summaryrefslogtreecommitdiff
path: root/lib/Target/X86
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/X86')
-rw-r--r--lib/Target/X86/AsmParser/Makefile15
-rw-r--r--lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp4
-rw-r--r--lib/Target/X86/AsmParser/X86AsmParser.cpp366
-rw-r--r--lib/Target/X86/AsmParser/X86AsmParserCommon.h2
-rw-r--r--lib/Target/X86/AsmParser/X86Operand.h61
-rw-r--r--lib/Target/X86/CMakeLists.txt7
-rw-r--r--lib/Target/X86/Disassembler/Makefile18
-rw-r--r--lib/Target/X86/Disassembler/X86Disassembler.cpp108
-rw-r--r--lib/Target/X86/Disassembler/X86Disassembler.h112
-rw-r--r--lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp24
-rw-r--r--lib/Target/X86/Disassembler/X86DisassemblerDecoder.h7
-rw-r--r--lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h10
-rw-r--r--lib/Target/X86/InstPrinter/Makefile15
-rw-r--r--lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp23
-rw-r--r--lib/Target/X86/InstPrinter/X86InstComments.cpp837
-rw-r--r--lib/Target/X86/MCTargetDesc/CMakeLists.txt2
-rw-r--r--lib/Target/X86/MCTargetDesc/Makefile16
-rw-r--r--lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp114
-rw-r--r--lib/Target/X86/MCTargetDesc/X86BaseInfo.h31
-rw-r--r--lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp75
-rw-r--r--lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp141
-rw-r--r--lib/Target/X86/MCTargetDesc/X86FixupKinds.h6
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp2
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp618
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp101
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h10
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp119
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp7
-rw-r--r--lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp8
-rw-r--r--lib/Target/X86/Makefile23
-rw-r--r--lib/Target/X86/README-X86-64.txt2
-rw-r--r--lib/Target/X86/README.txt6
-rw-r--r--lib/Target/X86/TargetInfo/Makefile16
-rw-r--r--lib/Target/X86/Utils/Makefile15
-rw-r--r--lib/Target/X86/Utils/X86ShuffleDecode.cpp162
-rw-r--r--lib/Target/X86/Utils/X86ShuffleDecode.h94
-rw-r--r--lib/Target/X86/X86.h15
-rw-r--r--lib/Target/X86/X86.td395
-rw-r--r--lib/Target/X86/X86AsmPrinter.cpp81
-rw-r--r--lib/Target/X86/X86AsmPrinter.h59
-rw-r--r--lib/Target/X86/X86CallFrameOptimization.cpp190
-rw-r--r--lib/Target/X86/X86CallingConv.td60
-rw-r--r--lib/Target/X86/X86ExpandPseudo.cpp82
-rw-r--r--lib/Target/X86/X86FastISel.cpp332
-rw-r--r--lib/Target/X86/X86FixupBWInsts.cpp371
-rw-r--r--lib/Target/X86/X86FixupLEAs.cpp124
-rw-r--r--lib/Target/X86/X86FixupSetCC.cpp186
-rw-r--r--lib/Target/X86/X86FloatingPoint.cpp410
-rw-r--r--lib/Target/X86/X86FrameLowering.cpp529
-rw-r--r--lib/Target/X86/X86FrameLowering.h53
-rw-r--r--lib/Target/X86/X86ISelDAGToDAG.cpp444
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp12158
-rw-r--r--lib/Target/X86/X86ISelLowering.h306
-rw-r--r--lib/Target/X86/X86InstrAVX512.td3106
-rw-r--r--lib/Target/X86/X86InstrBuilder.h30
-rw-r--r--lib/Target/X86/X86InstrCompiler.td216
-rw-r--r--lib/Target/X86/X86InstrControl.td12
-rw-r--r--lib/Target/X86/X86InstrFPStack.td10
-rw-r--r--lib/Target/X86/X86InstrFormats.td2
-rw-r--r--lib/Target/X86/X86InstrFragmentsSIMD.td244
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp2229
-rw-r--r--lib/Target/X86/X86InstrInfo.h268
-rw-r--r--lib/Target/X86/X86InstrInfo.td367
-rw-r--r--lib/Target/X86/X86InstrMMX.td2
-rw-r--r--lib/Target/X86/X86InstrMPX.td4
-rw-r--r--lib/Target/X86/X86InstrSSE.td1473
-rw-r--r--lib/Target/X86/X86InstrSystem.td40
-rw-r--r--lib/Target/X86/X86InstrVMX.td6
-rw-r--r--lib/Target/X86/X86InstrXOP.td214
-rw-r--r--lib/Target/X86/X86IntrinsicsInfo.h928
-rw-r--r--lib/Target/X86/X86MCInstLower.cpp635
-rw-r--r--lib/Target/X86/X86MachineFunctionInfo.h13
-rw-r--r--lib/Target/X86/X86OptimizeLEAs.cpp480
-rw-r--r--lib/Target/X86/X86PadShortFunction.cpp14
-rw-r--r--lib/Target/X86/X86RegisterInfo.cpp52
-rw-r--r--lib/Target/X86/X86RegisterInfo.td63
-rw-r--r--lib/Target/X86/X86Schedule.td15
-rw-r--r--lib/Target/X86/X86ScheduleAtom.td1
-rw-r--r--lib/Target/X86/X86SelectionDAGInfo.cpp17
-rw-r--r--lib/Target/X86/X86SelectionDAGInfo.h26
-rw-r--r--lib/Target/X86/X86ShuffleDecodeConstantPool.cpp218
-rw-r--r--lib/Target/X86/X86ShuffleDecodeConstantPool.h15
-rw-r--r--lib/Target/X86/X86Subtarget.cpp175
-rw-r--r--lib/Target/X86/X86Subtarget.h97
-rw-r--r--lib/Target/X86/X86TargetMachine.cpp94
-rw-r--r--lib/Target/X86/X86TargetMachine.h5
-rw-r--r--lib/Target/X86/X86TargetObjectFile.cpp75
-rw-r--r--lib/Target/X86/X86TargetObjectFile.h13
-rw-r--r--lib/Target/X86/X86TargetTransformInfo.cpp405
-rw-r--r--lib/Target/X86/X86TargetTransformInfo.h5
-rw-r--r--lib/Target/X86/X86VZeroUpper.cpp88
-rw-r--r--lib/Target/X86/X86WinAllocaExpander.cpp294
-rw-r--r--lib/Target/X86/X86WinEHState.cpp464
93 files changed, 19031 insertions, 12356 deletions
diff --git a/lib/Target/X86/AsmParser/Makefile b/lib/Target/X86/AsmParser/Makefile
deleted file mode 100644
index f834dfc300a1b..0000000000000
--- a/lib/Target/X86/AsmParser/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/X86/AsmParser/Makefile -------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMX86AsmParser
-
-# Hack: we need to include 'main' X86 target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp b/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
index 09cc53a8e6d3c..c38a7d1dd44df 100644
--- a/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
+++ b/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#include "MCTargetDesc/X86BaseInfo.h"
#include "X86AsmInstrumentation.h"
+#include "MCTargetDesc/X86BaseInfo.h"
#include "X86Operand.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
@@ -18,9 +18,9 @@
#include "llvm/MC/MCInstBuilder.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/Support/CommandLine.h"
#include <algorithm>
diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 4d8ffac1a82bb..4e0ad8bfe1f12 100644
--- a/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -11,7 +11,6 @@
#include "X86AsmInstrumentation.h"
#include "X86AsmParserCommon.h"
#include "X86Operand.h"
-#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
@@ -24,12 +23,12 @@
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
@@ -683,9 +682,14 @@ private:
std::unique_ptr<X86Operand> DefaultMemSIOperand(SMLoc Loc);
std::unique_ptr<X86Operand> DefaultMemDIOperand(SMLoc Loc);
- void AddDefaultSrcDestOperands(
- OperandVector& Operands, std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
- std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst);
+ bool IsSIReg(unsigned Reg);
+ unsigned GetSIDIForRegClass(unsigned RegClassID, unsigned Reg, bool IsSIReg);
+ void
+ AddDefaultSrcDestOperands(OperandVector &Operands,
+ std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
+ std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst);
+ bool VerifyAndAdjustOperands(OperandVector &OrigOperands,
+ OperandVector &FinalOperands);
std::unique_ptr<X86Operand> ParseOperand();
std::unique_ptr<X86Operand> ParseATTOperand();
std::unique_ptr<X86Operand> ParseIntelOperand();
@@ -747,11 +751,6 @@ private:
bool OmitRegisterFromClobberLists(unsigned RegNo) override;
- /// doSrcDstMatch - Returns true if operands are matching in their
- /// word size (%si and %di, %esi and %edi, etc.). Order depends on
- /// the parsing mode (Intel vs. AT&T).
- bool doSrcDstMatch(X86Operand &Op1, X86Operand &Op2);
-
/// Parses AVX512 specific operand primitives: masked registers ({%k<NUM>}, {z})
/// and memory broadcasting ({1to<NUM>}) primitives, updating Operands vector if required.
/// \return \c true if no parsing errors occurred, \c false otherwise.
@@ -867,27 +866,6 @@ static bool CheckBaseRegAndIndexReg(unsigned BaseReg, unsigned IndexReg,
return false;
}
-bool X86AsmParser::doSrcDstMatch(X86Operand &Op1, X86Operand &Op2)
-{
- // Return true and let a normal complaint about bogus operands happen.
- if (!Op1.isMem() || !Op2.isMem())
- return true;
-
- // Actually these might be the other way round if Intel syntax is
- // being used. It doesn't matter.
- unsigned diReg = Op1.Mem.BaseReg;
- unsigned siReg = Op2.Mem.BaseReg;
-
- if (X86MCRegisterClasses[X86::GR16RegClassID].contains(siReg))
- return X86MCRegisterClasses[X86::GR16RegClassID].contains(diReg);
- if (X86MCRegisterClasses[X86::GR32RegClassID].contains(siReg))
- return X86MCRegisterClasses[X86::GR32RegClassID].contains(diReg);
- if (X86MCRegisterClasses[X86::GR64RegClassID].contains(siReg))
- return X86MCRegisterClasses[X86::GR64RegClassID].contains(diReg);
- // Again, return true and let another error happen.
- return true;
-}
-
bool X86AsmParser::ParseRegister(unsigned &RegNo,
SMLoc &StartLoc, SMLoc &EndLoc) {
MCAsmParser &Parser = getParser();
@@ -929,10 +907,16 @@ bool X86AsmParser::ParseRegister(unsigned &RegNo,
if (RegNo == X86::RIZ ||
X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo) ||
X86II::isX86_64NonExtLowByteReg(RegNo) ||
- X86II::isX86_64ExtendedReg(RegNo))
+ X86II::isX86_64ExtendedReg(RegNo) ||
+ X86II::is32ExtendedReg(RegNo))
return Error(StartLoc, "register %"
+ Tok.getString() + " is only available in 64-bit mode",
SMRange(StartLoc, EndLoc));
+ } else if (!getSTI().getFeatureBits()[X86::FeatureAVX512]) {
+ if (X86II::is32ExtendedReg(RegNo))
+ return Error(StartLoc, "register %"
+ + Tok.getString() + " is only available with AVX512",
+ SMRange(StartLoc, EndLoc));
}
// Parse "%st" as "%st(0)" and "%st(1)", which is multiple tokens.
@@ -1025,6 +1009,33 @@ std::unique_ptr<X86Operand> X86AsmParser::DefaultMemDIOperand(SMLoc Loc) {
Loc, Loc, 0);
}
+bool X86AsmParser::IsSIReg(unsigned Reg) {
+ switch (Reg) {
+ default: llvm_unreachable("Only (R|E)SI and (R|E)DI are expected!");
+ case X86::RSI:
+ case X86::ESI:
+ case X86::SI:
+ return true;
+ case X86::RDI:
+ case X86::EDI:
+ case X86::DI:
+ return false;
+ }
+}
+
+unsigned X86AsmParser::GetSIDIForRegClass(unsigned RegClassID, unsigned Reg,
+ bool IsSIReg) {
+ switch (RegClassID) {
+ default: llvm_unreachable("Unexpected register class");
+ case X86::GR64RegClassID:
+ return IsSIReg ? X86::RSI : X86::RDI;
+ case X86::GR32RegClassID:
+ return IsSIReg ? X86::ESI : X86::EDI;
+ case X86::GR16RegClassID:
+ return IsSIReg ? X86::SI : X86::DI;
+ }
+}
+
void X86AsmParser::AddDefaultSrcDestOperands(
OperandVector& Operands, std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst) {
@@ -1038,6 +1049,88 @@ void X86AsmParser::AddDefaultSrcDestOperands(
}
}
+bool X86AsmParser::VerifyAndAdjustOperands(OperandVector &OrigOperands,
+ OperandVector &FinalOperands) {
+
+ if (OrigOperands.size() > 1) {
+ // Check if sizes match, OrigOperands also contains the instruction name
+ assert(OrigOperands.size() == FinalOperands.size() + 1 &&
+ "Operand size mismatch");
+
+ SmallVector<std::pair<SMLoc, std::string>, 2> Warnings;
+ // Verify types match
+ int RegClassID = -1;
+ for (unsigned int i = 0; i < FinalOperands.size(); ++i) {
+ X86Operand &OrigOp = static_cast<X86Operand &>(*OrigOperands[i + 1]);
+ X86Operand &FinalOp = static_cast<X86Operand &>(*FinalOperands[i]);
+
+ if (FinalOp.isReg() &&
+ (!OrigOp.isReg() || FinalOp.getReg() != OrigOp.getReg()))
+ // Return false and let a normal complaint about bogus operands happen
+ return false;
+
+ if (FinalOp.isMem()) {
+
+ if (!OrigOp.isMem())
+ // Return false and let a normal complaint about bogus operands happen
+ return false;
+
+ unsigned OrigReg = OrigOp.Mem.BaseReg;
+ unsigned FinalReg = FinalOp.Mem.BaseReg;
+
+ // If we've already encounterd a register class, make sure all register
+ // bases are of the same register class
+ if (RegClassID != -1 &&
+ !X86MCRegisterClasses[RegClassID].contains(OrigReg)) {
+ return Error(OrigOp.getStartLoc(),
+ "mismatching source and destination index registers");
+ }
+
+ if (X86MCRegisterClasses[X86::GR64RegClassID].contains(OrigReg))
+ RegClassID = X86::GR64RegClassID;
+ else if (X86MCRegisterClasses[X86::GR32RegClassID].contains(OrigReg))
+ RegClassID = X86::GR32RegClassID;
+ else if (X86MCRegisterClasses[X86::GR16RegClassID].contains(OrigReg))
+ RegClassID = X86::GR16RegClassID;
+ else
+ // Unexpected register class type
+ // Return false and let a normal complaint about bogus operands happen
+ return false;
+
+ bool IsSI = IsSIReg(FinalReg);
+ FinalReg = GetSIDIForRegClass(RegClassID, FinalReg, IsSI);
+
+ if (FinalReg != OrigReg) {
+ std::string RegName = IsSI ? "ES:(R|E)SI" : "ES:(R|E)DI";
+ Warnings.push_back(std::make_pair(
+ OrigOp.getStartLoc(),
+ "memory operand is only for determining the size, " + RegName +
+ " will be used for the location"));
+ }
+
+ FinalOp.Mem.Size = OrigOp.Mem.Size;
+ FinalOp.Mem.SegReg = OrigOp.Mem.SegReg;
+ FinalOp.Mem.BaseReg = FinalReg;
+ }
+ }
+
+ // Produce warnings only if all the operands passed the adjustment - prevent
+ // legal cases like "movsd (%rax), %xmm0" mistakenly produce warnings
+ for (auto &WarningMsg : Warnings) {
+ Warning(WarningMsg.first, WarningMsg.second);
+ }
+
+ // Remove old operands
+ for (unsigned int i = 0; i < FinalOperands.size(); ++i)
+ OrigOperands.pop_back();
+ }
+ // OrigOperands.append(FinalOperands.begin(), FinalOperands.end());
+ for (unsigned int i = 0; i < FinalOperands.size(); ++i)
+ OrigOperands.push_back(std::move(FinalOperands[i]));
+
+ return false;
+}
+
std::unique_ptr<X86Operand> X86AsmParser::ParseOperand() {
if (isParsingIntelSyntax())
return ParseIntelOperand();
@@ -1301,7 +1394,7 @@ X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
return ErrorOperand(BracLoc, "Expected '[' token!");
Parser.Lex(); // Eat '['
- SMLoc StartInBrac = Tok.getLoc();
+ SMLoc StartInBrac = Parser.getTok().getLoc();
// Parse [ Symbol + ImmDisp ] and [ BaseReg + Scale*IndexReg + ImmDisp ]. We
// may have already parsed an immediate displacement before the bracketed
// expression.
@@ -1330,7 +1423,10 @@ X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
// Parse struct field access. Intel requires a dot, but MSVC doesn't. MSVC
// will in fact do global lookup the field name inside all global typedefs,
// but we don't emulate that.
- if (Tok.getString().find('.') != StringRef::npos) {
+ if ((Parser.getTok().getKind() == AsmToken::Identifier ||
+ Parser.getTok().getKind() == AsmToken::Dot ||
+ Parser.getTok().getKind() == AsmToken::Real) &&
+ Parser.getTok().getString().find('.') != StringRef::npos) {
const MCExpr *NewDisp;
if (ParseIntelDotOperator(Disp, NewDisp))
return nullptr;
@@ -2087,22 +2183,36 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
unsigned ComparisonCode = StringSwitch<unsigned>(
PatchedName.slice(CCIdx, PatchedName.size() - 2))
.Case("eq", 0x00)
+ .Case("eq_oq", 0x00)
.Case("lt", 0x01)
+ .Case("lt_os", 0x01)
.Case("le", 0x02)
+ .Case("le_os", 0x02)
.Case("unord", 0x03)
+ .Case("unord_q", 0x03)
.Case("neq", 0x04)
+ .Case("neq_uq", 0x04)
.Case("nlt", 0x05)
+ .Case("nlt_us", 0x05)
.Case("nle", 0x06)
+ .Case("nle_us", 0x06)
.Case("ord", 0x07)
+ .Case("ord_q", 0x07)
/* AVX only from here */
.Case("eq_uq", 0x08)
.Case("nge", 0x09)
+ .Case("nge_us", 0x09)
.Case("ngt", 0x0A)
+ .Case("ngt_us", 0x0A)
.Case("false", 0x0B)
+ .Case("false_oq", 0x0B)
.Case("neq_oq", 0x0C)
.Case("ge", 0x0D)
+ .Case("ge_os", 0x0D)
.Case("gt", 0x0E)
+ .Case("gt_os", 0x0E)
.Case("true", 0x0F)
+ .Case("true_uq", 0x0F)
.Case("eq_os", 0x10)
.Case("lt_oq", 0x11)
.Case("le_oq", 0x12)
@@ -2196,6 +2306,7 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
Name == "repne" || Name == "repnz" ||
Name == "rex64" || Name == "data16";
+ bool CurlyAsEndOfStatement = false;
// This does the actual operand parsing. Don't parse any more if we have a
// prefix juxtaposed with an operation like "lock incl 4(%rax)", because we
// just want to parse the "lock" as the first instruction and the "incl" as
@@ -2223,7 +2334,12 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
break;
}
- if (getLexer().isNot(AsmToken::EndOfStatement))
+ // In MS inline asm curly braces mark the begining/end of a block, therefore
+ // they should be interepreted as end of statement
+ CurlyAsEndOfStatement =
+ isParsingIntelSyntax() && isParsingInlineAsm() &&
+ (getLexer().is(AsmToken::LCurly) || getLexer().is(AsmToken::RCurly));
+ if (getLexer().isNot(AsmToken::EndOfStatement) && !CurlyAsEndOfStatement)
return ErrorAndEatStatement(getLexer().getLoc(),
"unexpected token in argument list");
}
@@ -2232,6 +2348,10 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
if (getLexer().is(AsmToken::EndOfStatement) ||
(isPrefix && getLexer().is(AsmToken::Slash)))
Parser.Lex();
+ else if (CurlyAsEndOfStatement)
+ // Add an actual EndOfStatement before the curly brace
+ Info.AsmRewrites->emplace_back(AOK_EndOfStatement,
+ getLexer().getTok().getLoc(), 0);
// This is for gas compatibility and cannot be done in td.
// Adding "p" for some floating point with no argument.
@@ -2247,10 +2367,11 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
static_cast<X86Operand &>(*Operands[0]).setTokenValue(Repl);
}
- // This is a terrible hack to handle "out[bwl]? %al, (%dx)" ->
+ // This is a terrible hack to handle "out[s]?[bwl]? %al, (%dx)" ->
// "outb %al, %dx". Out doesn't take a memory form, but this is a widely
// documented form in various unofficial manuals, so a lot of code uses it.
- if ((Name == "outb" || Name == "outw" || Name == "outl" || Name == "out") &&
+ if ((Name == "outb" || Name == "outsb" || Name == "outw" || Name == "outsw" ||
+ Name == "outl" || Name == "outsl" || Name == "out" || Name == "outs") &&
Operands.size() == 3) {
X86Operand &Op = (X86Operand &)*Operands.back();
if (Op.isMem() && Op.Mem.SegReg == 0 &&
@@ -2261,8 +2382,9 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
Operands.back() = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc);
}
}
- // Same hack for "in[bwl]? (%dx), %al" -> "inb %dx, %al".
- if ((Name == "inb" || Name == "inw" || Name == "inl" || Name == "in") &&
+ // Same hack for "in[s]?[bwl]? (%dx), %al" -> "inb %dx, %al".
+ if ((Name == "inb" || Name == "insb" || Name == "inw" || Name == "insw" ||
+ Name == "inl" || Name == "insl" || Name == "in" || Name == "ins") &&
Operands.size() == 3) {
X86Operand &Op = (X86Operand &)*Operands[1];
if (Op.isMem() && Op.Mem.SegReg == 0 &&
@@ -2274,84 +2396,92 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
}
}
+ SmallVector<std::unique_ptr<MCParsedAsmOperand>, 2> TmpOperands;
+ bool HadVerifyError = false;
+
// Append default arguments to "ins[bwld]"
- if (Name.startswith("ins") && Operands.size() == 1 &&
- (Name == "insb" || Name == "insw" || Name == "insl" || Name == "insd")) {
- AddDefaultSrcDestOperands(Operands,
+ if (Name.startswith("ins") &&
+ (Operands.size() == 1 || Operands.size() == 3) &&
+ (Name == "insb" || Name == "insw" || Name == "insl" || Name == "insd" ||
+ Name == "ins")) {
+
+ AddDefaultSrcDestOperands(TmpOperands,
X86Operand::CreateReg(X86::DX, NameLoc, NameLoc),
DefaultMemDIOperand(NameLoc));
+ HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
}
// Append default arguments to "outs[bwld]"
- if (Name.startswith("outs") && Operands.size() == 1 &&
+ if (Name.startswith("outs") &&
+ (Operands.size() == 1 || Operands.size() == 3) &&
(Name == "outsb" || Name == "outsw" || Name == "outsl" ||
- Name == "outsd" )) {
- AddDefaultSrcDestOperands(Operands,
- DefaultMemSIOperand(NameLoc),
+ Name == "outsd" || Name == "outs")) {
+ AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
X86Operand::CreateReg(X86::DX, NameLoc, NameLoc));
+ HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
}
// Transform "lods[bwlq]" into "lods[bwlq] ($SIREG)" for appropriate
// values of $SIREG according to the mode. It would be nice if this
// could be achieved with InstAlias in the tables.
- if (Name.startswith("lods") && Operands.size() == 1 &&
+ if (Name.startswith("lods") &&
+ (Operands.size() == 1 || Operands.size() == 2) &&
(Name == "lods" || Name == "lodsb" || Name == "lodsw" ||
- Name == "lodsl" || Name == "lodsd" || Name == "lodsq"))
- Operands.push_back(DefaultMemSIOperand(NameLoc));
+ Name == "lodsl" || Name == "lodsd" || Name == "lodsq")) {
+ TmpOperands.push_back(DefaultMemSIOperand(NameLoc));
+ HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
+ }
// Transform "stos[bwlq]" into "stos[bwlq] ($DIREG)" for appropriate
// values of $DIREG according to the mode. It would be nice if this
// could be achieved with InstAlias in the tables.
- if (Name.startswith("stos") && Operands.size() == 1 &&
+ if (Name.startswith("stos") &&
+ (Operands.size() == 1 || Operands.size() == 2) &&
(Name == "stos" || Name == "stosb" || Name == "stosw" ||
- Name == "stosl" || Name == "stosd" || Name == "stosq"))
- Operands.push_back(DefaultMemDIOperand(NameLoc));
+ Name == "stosl" || Name == "stosd" || Name == "stosq")) {
+ TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
+ HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
+ }
// Transform "scas[bwlq]" into "scas[bwlq] ($DIREG)" for appropriate
// values of $DIREG according to the mode. It would be nice if this
// could be achieved with InstAlias in the tables.
- if (Name.startswith("scas") && Operands.size() == 1 &&
+ if (Name.startswith("scas") &&
+ (Operands.size() == 1 || Operands.size() == 2) &&
(Name == "scas" || Name == "scasb" || Name == "scasw" ||
- Name == "scasl" || Name == "scasd" || Name == "scasq"))
- Operands.push_back(DefaultMemDIOperand(NameLoc));
+ Name == "scasl" || Name == "scasd" || Name == "scasq")) {
+ TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
+ HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
+ }
// Add default SI and DI operands to "cmps[bwlq]".
if (Name.startswith("cmps") &&
+ (Operands.size() == 1 || Operands.size() == 3) &&
(Name == "cmps" || Name == "cmpsb" || Name == "cmpsw" ||
Name == "cmpsl" || Name == "cmpsd" || Name == "cmpsq")) {
- if (Operands.size() == 1) {
- AddDefaultSrcDestOperands(Operands,
- DefaultMemDIOperand(NameLoc),
- DefaultMemSIOperand(NameLoc));
- } else if (Operands.size() == 3) {
- X86Operand &Op = (X86Operand &)*Operands[1];
- X86Operand &Op2 = (X86Operand &)*Operands[2];
- if (!doSrcDstMatch(Op, Op2))
- return Error(Op.getStartLoc(),
- "mismatching source and destination index registers");
- }
+ AddDefaultSrcDestOperands(TmpOperands, DefaultMemDIOperand(NameLoc),
+ DefaultMemSIOperand(NameLoc));
+ HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
}
// Add default SI and DI operands to "movs[bwlq]".
- if ((Name.startswith("movs") &&
- (Name == "movs" || Name == "movsb" || Name == "movsw" ||
- Name == "movsl" || Name == "movsd" || Name == "movsq")) ||
- (Name.startswith("smov") &&
- (Name == "smov" || Name == "smovb" || Name == "smovw" ||
- Name == "smovl" || Name == "smovd" || Name == "smovq"))) {
- if (Operands.size() == 1) {
- if (Name == "movsd")
- Operands.back() = X86Operand::CreateToken("movsl", NameLoc);
- AddDefaultSrcDestOperands(Operands,
- DefaultMemSIOperand(NameLoc),
- DefaultMemDIOperand(NameLoc));
- } else if (Operands.size() == 3) {
- X86Operand &Op = (X86Operand &)*Operands[1];
- X86Operand &Op2 = (X86Operand &)*Operands[2];
- if (!doSrcDstMatch(Op, Op2))
- return Error(Op.getStartLoc(),
- "mismatching source and destination index registers");
- }
+ if (((Name.startswith("movs") &&
+ (Name == "movs" || Name == "movsb" || Name == "movsw" ||
+ Name == "movsl" || Name == "movsd" || Name == "movsq")) ||
+ (Name.startswith("smov") &&
+ (Name == "smov" || Name == "smovb" || Name == "smovw" ||
+ Name == "smovl" || Name == "smovd" || Name == "smovq"))) &&
+ (Operands.size() == 1 || Operands.size() == 3)) {
+ if (Name == "movsd" && Operands.size() == 1)
+ Operands.back() = X86Operand::CreateToken("movsl", NameLoc);
+ AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
+ DefaultMemDIOperand(NameLoc));
+ HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
+ }
+
+ // Check if we encountered an error for one the string insturctions
+ if (HadVerifyError) {
+ return HadVerifyError;
}
// FIXME: Hack to handle recognize s{hr,ar,hl} $1, <op>. Canonicalize to
@@ -2387,64 +2517,22 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
}
}
+ // Transforms "xlat mem8" into "xlatb"
+ if ((Name == "xlat" || Name == "xlatb") && Operands.size() == 2) {
+ X86Operand &Op1 = static_cast<X86Operand &>(*Operands[1]);
+ if (Op1.isMem8()) {
+ Warning(Op1.getStartLoc(), "memory operand is only for determining the "
+ "size, (R|E)BX will be used for the location");
+ Operands.pop_back();
+ static_cast<X86Operand &>(*Operands[0]).setTokenValue("xlatb");
+ }
+ }
+
return false;
}
bool X86AsmParser::processInstruction(MCInst &Inst, const OperandVector &Ops) {
- switch (Inst.getOpcode()) {
- default: return false;
- case X86::VMOVZPQILo2PQIrr:
- case X86::VMOVAPDrr:
- case X86::VMOVAPDYrr:
- case X86::VMOVAPSrr:
- case X86::VMOVAPSYrr:
- case X86::VMOVDQArr:
- case X86::VMOVDQAYrr:
- case X86::VMOVDQUrr:
- case X86::VMOVDQUYrr:
- case X86::VMOVUPDrr:
- case X86::VMOVUPDYrr:
- case X86::VMOVUPSrr:
- case X86::VMOVUPSYrr: {
- if (X86II::isX86_64ExtendedReg(Inst.getOperand(0).getReg()) ||
- !X86II::isX86_64ExtendedReg(Inst.getOperand(1).getReg()))
- return false;
-
- unsigned NewOpc;
- switch (Inst.getOpcode()) {
- default: llvm_unreachable("Invalid opcode");
- case X86::VMOVZPQILo2PQIrr: NewOpc = X86::VMOVPQI2QIrr; break;
- case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break;
- case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break;
- case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break;
- case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break;
- case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break;
- case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break;
- case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break;
- case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break;
- case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break;
- case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break;
- case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break;
- case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break;
- }
- Inst.setOpcode(NewOpc);
- return true;
- }
- case X86::VMOVSDrr:
- case X86::VMOVSSrr: {
- if (X86II::isX86_64ExtendedReg(Inst.getOperand(0).getReg()) ||
- !X86II::isX86_64ExtendedReg(Inst.getOperand(2).getReg()))
- return false;
- unsigned NewOpc;
- switch (Inst.getOpcode()) {
- default: llvm_unreachable("Invalid opcode");
- case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break;
- case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break;
- }
- Inst.setOpcode(NewOpc);
- return true;
- }
- }
+ return false;
}
static const char *getSubtargetFeatureName(uint64_t Val);
diff --git a/lib/Target/X86/AsmParser/X86AsmParserCommon.h b/lib/Target/X86/AsmParser/X86AsmParserCommon.h
index 54538c804a03b..c45a3f14ef116 100644
--- a/lib/Target/X86/AsmParser/X86AsmParserCommon.h
+++ b/lib/Target/X86/AsmParser/X86AsmParserCommon.h
@@ -10,6 +10,8 @@
#ifndef LLVM_LIB_TARGET_X86_ASMPARSER_X86ASMPARSERCOMMON_H
#define LLVM_LIB_TARGET_X86_ASMPARSER_X86ASMPARSERCOMMON_H
+#include "llvm/Support/MathExtras.h"
+
namespace llvm {
inline bool isImmSExti16i8Value(uint64_t Value) {
diff --git a/lib/Target/X86/AsmParser/X86Operand.h b/lib/Target/X86/AsmParser/X86Operand.h
index 7ec02408ffa41..a04c2f5c84a53 100644
--- a/lib/Target/X86/AsmParser/X86Operand.h
+++ b/lib/Target/X86/AsmParser/X86Operand.h
@@ -233,46 +233,47 @@ struct X86Operand : public MCParsedAsmOperand {
bool isMem512() const {
return Kind == Memory && (!Mem.Size || Mem.Size == 512);
}
+ bool isMemIndexReg(unsigned LowR, unsigned HighR) const {
+ assert(Kind == Memory && "Invalid access!");
+ return Mem.IndexReg >= LowR && Mem.IndexReg <= HighR;
+ }
- bool isMemVX32() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
- getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
+ bool isMem64_RC128() const {
+ return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM15);
+ }
+ bool isMem128_RC128() const {
+ return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM15);
}
- bool isMemVX32X() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
- getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM31;
+ bool isMem128_RC256() const {
+ return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM15);
}
- bool isMemVY32() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
- getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
+ bool isMem256_RC128() const {
+ return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM15);
}
- bool isMemVY32X() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
- getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM31;
+ bool isMem256_RC256() const {
+ return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM15);
+ }
+
+ bool isMem64_RC128X() const {
+ return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM31);
}
- bool isMemVX64() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
- getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
+ bool isMem128_RC128X() const {
+ return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM31);
}
- bool isMemVX64X() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
- getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM31;
+ bool isMem128_RC256X() const {
+ return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM31);
}
- bool isMemVY64() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
- getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
+ bool isMem256_RC128X() const {
+ return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM31);
}
- bool isMemVY64X() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
- getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM31;
+ bool isMem256_RC256X() const {
+ return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM31);
}
- bool isMemVZ32() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
- getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31;
+ bool isMem512_RC256X() const {
+ return isMem512() && isMemIndexReg(X86::YMM0, X86::YMM31);
}
- bool isMemVZ64() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
- getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31;
+ bool isMem512_RC512() const {
+ return isMem512() && isMemIndexReg(X86::ZMM0, X86::ZMM31);
}
bool isAbsMem() const {
diff --git a/lib/Target/X86/CMakeLists.txt b/lib/Target/X86/CMakeLists.txt
index 55949155da9ea..894090f789777 100644
--- a/lib/Target/X86/CMakeLists.txt
+++ b/lib/Target/X86/CMakeLists.txt
@@ -17,6 +17,9 @@ set(sources
X86CallFrameOptimization.cpp
X86ExpandPseudo.cpp
X86FastISel.cpp
+ X86FixupBWInsts.cpp
+ X86FixupLEAs.cpp
+ X86FixupSetCC.cpp
X86FloatingPoint.cpp
X86FrameLowering.cpp
X86ISelDAGToDAG.cpp
@@ -24,6 +27,7 @@ set(sources
X86InstrInfo.cpp
X86MCInstLower.cpp
X86MachineFunctionInfo.cpp
+ X86OptimizeLEAs.cpp
X86PadShortFunction.cpp
X86RegisterInfo.cpp
X86SelectionDAGInfo.cpp
@@ -33,9 +37,8 @@ set(sources
X86TargetObjectFile.cpp
X86TargetTransformInfo.cpp
X86VZeroUpper.cpp
- X86FixupLEAs.cpp
+ X86WinAllocaExpander.cpp
X86WinEHState.cpp
- X86OptimizeLEAs.cpp
)
add_llvm_target(X86CodeGen ${sources})
diff --git a/lib/Target/X86/Disassembler/Makefile b/lib/Target/X86/Disassembler/Makefile
deleted file mode 100644
index 51e7b828cf2a4..0000000000000
--- a/lib/Target/X86/Disassembler/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-##===- lib/Target/X86/Disassembler/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMX86Disassembler
-
-# Hack: we need to include 'main' x86 target directory to grab private headers.
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
-
-.PHONY: $(PROJ_SRC_DIR)/X86DisassemblerDecoder.c
diff --git a/lib/Target/X86/Disassembler/X86Disassembler.cpp b/lib/Target/X86/Disassembler/X86Disassembler.cpp
index ce8fcf1646682..008dead5d0a5c 100644
--- a/lib/Target/X86/Disassembler/X86Disassembler.cpp
+++ b/lib/Target/X86/Disassembler/X86Disassembler.cpp
@@ -10,14 +10,74 @@
// This file is part of the X86 Disassembler.
// It contains code to translate the data produced by the decoder into
// MCInsts.
-// Documentation for the disassembler can be found in X86Disassembler.h.
+//
+//
+// The X86 disassembler is a table-driven disassembler for the 16-, 32-, and
+// 64-bit X86 instruction sets. The main decode sequence for an assembly
+// instruction in this disassembler is:
+//
+// 1. Read the prefix bytes and determine the attributes of the instruction.
+// These attributes, recorded in enum attributeBits
+// (X86DisassemblerDecoderCommon.h), form a bitmask. The table CONTEXTS_SYM
+// provides a mapping from bitmasks to contexts, which are represented by
+// enum InstructionContext (ibid.).
+//
+// 2. Read the opcode, and determine what kind of opcode it is. The
+// disassembler distinguishes four kinds of opcodes, which are enumerated in
+// OpcodeType (X86DisassemblerDecoderCommon.h): one-byte (0xnn), two-byte
+// (0x0f 0xnn), three-byte-38 (0x0f 0x38 0xnn), or three-byte-3a
+// (0x0f 0x3a 0xnn). Mandatory prefixes are treated as part of the context.
+//
+// 3. Depending on the opcode type, look in one of four ClassDecision structures
+// (X86DisassemblerDecoderCommon.h). Use the opcode class to determine which
+// OpcodeDecision (ibid.) to look the opcode in. Look up the opcode, to get
+// a ModRMDecision (ibid.).
+//
+// 4. Some instructions, such as escape opcodes or extended opcodes, or even
+// instructions that have ModRM*Reg / ModRM*Mem forms in LLVM, need the
+// ModR/M byte to complete decode. The ModRMDecision's type is an entry from
+// ModRMDecisionType (X86DisassemblerDecoderCommon.h) that indicates if the
+// ModR/M byte is required and how to interpret it.
+//
+// 5. After resolving the ModRMDecision, the disassembler has a unique ID
+// of type InstrUID (X86DisassemblerDecoderCommon.h). Looking this ID up in
+// INSTRUCTIONS_SYM yields the name of the instruction and the encodings and
+// meanings of its operands.
+//
+// 6. For each operand, its encoding is an entry from OperandEncoding
+// (X86DisassemblerDecoderCommon.h) and its type is an entry from
+// OperandType (ibid.). The encoding indicates how to read it from the
+// instruction; the type indicates how to interpret the value once it has
+// been read. For example, a register operand could be stored in the R/M
+// field of the ModR/M byte, the REG field of the ModR/M byte, or added to
+// the main opcode. This is orthogonal from its meaning (an GPR or an XMM
+// register, for instance). Given this information, the operands can be
+// extracted and interpreted.
+//
+// 7. As the last step, the disassembler translates the instruction information
+// and operands into a format understandable by the client - in this case, an
+// MCInst for use by the MC infrastructure.
+//
+// The disassembler is broken broadly into two parts: the table emitter that
+// emits the instruction decode tables discussed above during compilation, and
+// the disassembler itself. The table emitter is documented in more detail in
+// utils/TableGen/X86DisassemblerEmitter.h.
+//
+// X86Disassembler.cpp contains the code responsible for step 7, and for
+// invoking the decoder to execute steps 1-6.
+// X86DisassemblerDecoderCommon.h contains the definitions needed by both the
+// table emitter and the disassembler.
+// X86DisassemblerDecoder.h contains the public interface of the decoder,
+// factored out into C for possible use by other projects.
+// X86DisassemblerDecoder.c contains the source code of the decoder, which is
+// responsible for steps 1-6.
//
//===----------------------------------------------------------------------===//
-#include "X86Disassembler.h"
#include "X86DisassemblerDecoder.h"
+#include "MCTargetDesc/X86MCTargetDesc.h"
#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
@@ -31,13 +91,6 @@ using namespace llvm::X86Disassembler;
#define DEBUG_TYPE "x86-disassembler"
-#define GET_REGINFO_ENUM
-#include "X86GenRegisterInfo.inc"
-#define GET_INSTRINFO_ENUM
-#include "X86GenInstrInfo.inc"
-#define GET_SUBTARGETINFO_ENUM
-#include "X86GenSubtargetInfo.inc"
-
void llvm::X86Disassembler::Debug(const char *file, unsigned line,
const char *s) {
dbgs() << file << ":" << line << ": " << s;
@@ -67,14 +120,34 @@ namespace X86 {
};
}
-extern Target TheX86_32Target, TheX86_64Target;
-
}
static bool translateInstruction(MCInst &target,
InternalInstruction &source,
const MCDisassembler *Dis);
+namespace {
+
+/// Generic disassembler for all X86 platforms. All each platform class should
+/// have to do is subclass the constructor, and provide a different
+/// disassemblerMode value.
+class X86GenericDisassembler : public MCDisassembler {
+ std::unique_ptr<const MCInstrInfo> MII;
+public:
+ X86GenericDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx,
+ std::unique_ptr<const MCInstrInfo> MII);
+public:
+ DecodeStatus getInstruction(MCInst &instr, uint64_t &size,
+ ArrayRef<uint8_t> Bytes, uint64_t Address,
+ raw_ostream &vStream,
+ raw_ostream &cStream) const override;
+
+private:
+ DisassemblerMode fMode;
+};
+
+}
+
X86GenericDisassembler::X86GenericDisassembler(
const MCSubtargetInfo &STI,
MCContext &Ctx,
@@ -826,7 +899,6 @@ static bool translateRM(MCInst &mcInst, const OperandSpecifier &operand,
case TYPE_R64:
case TYPE_Rv:
case TYPE_MM64:
- case TYPE_XMM:
case TYPE_XMM32:
case TYPE_XMM64:
case TYPE_XMM128:
@@ -911,14 +983,6 @@ static bool translateOperand(MCInst &mcInst, const OperandSpecifier &operand,
return translateMaskRegister(mcInst, insn.writemask);
CASE_ENCODING_RM:
return translateRM(mcInst, operand, insn, Dis);
- case ENCODING_CB:
- case ENCODING_CW:
- case ENCODING_CD:
- case ENCODING_CP:
- case ENCODING_CO:
- case ENCODING_CT:
- debug("Translation of code offsets isn't supported.");
- return true;
case ENCODING_IB:
case ENCODING_IW:
case ENCODING_ID:
@@ -997,7 +1061,7 @@ static MCDisassembler *createX86Disassembler(const Target &T,
const MCSubtargetInfo &STI,
MCContext &Ctx) {
std::unique_ptr<const MCInstrInfo> MII(T.createMCInstrInfo());
- return new X86Disassembler::X86GenericDisassembler(STI, Ctx, std::move(MII));
+ return new X86GenericDisassembler(STI, Ctx, std::move(MII));
}
extern "C" void LLVMInitializeX86Disassembler() {
diff --git a/lib/Target/X86/Disassembler/X86Disassembler.h b/lib/Target/X86/Disassembler/X86Disassembler.h
deleted file mode 100644
index d7f426b2641d1..0000000000000
--- a/lib/Target/X86/Disassembler/X86Disassembler.h
+++ /dev/null
@@ -1,112 +0,0 @@
-//===-- X86Disassembler.h - Disassembler for x86 and x86_64 -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// The X86 disassembler is a table-driven disassembler for the 16-, 32-, and
-// 64-bit X86 instruction sets. The main decode sequence for an assembly
-// instruction in this disassembler is:
-//
-// 1. Read the prefix bytes and determine the attributes of the instruction.
-// These attributes, recorded in enum attributeBits
-// (X86DisassemblerDecoderCommon.h), form a bitmask. The table CONTEXTS_SYM
-// provides a mapping from bitmasks to contexts, which are represented by
-// enum InstructionContext (ibid.).
-//
-// 2. Read the opcode, and determine what kind of opcode it is. The
-// disassembler distinguishes four kinds of opcodes, which are enumerated in
-// OpcodeType (X86DisassemblerDecoderCommon.h): one-byte (0xnn), two-byte
-// (0x0f 0xnn), three-byte-38 (0x0f 0x38 0xnn), or three-byte-3a
-// (0x0f 0x3a 0xnn). Mandatory prefixes are treated as part of the context.
-//
-// 3. Depending on the opcode type, look in one of four ClassDecision structures
-// (X86DisassemblerDecoderCommon.h). Use the opcode class to determine which
-// OpcodeDecision (ibid.) to look the opcode in. Look up the opcode, to get
-// a ModRMDecision (ibid.).
-//
-// 4. Some instructions, such as escape opcodes or extended opcodes, or even
-// instructions that have ModRM*Reg / ModRM*Mem forms in LLVM, need the
-// ModR/M byte to complete decode. The ModRMDecision's type is an entry from
-// ModRMDecisionType (X86DisassemblerDecoderCommon.h) that indicates if the
-// ModR/M byte is required and how to interpret it.
-//
-// 5. After resolving the ModRMDecision, the disassembler has a unique ID
-// of type InstrUID (X86DisassemblerDecoderCommon.h). Looking this ID up in
-// INSTRUCTIONS_SYM yields the name of the instruction and the encodings and
-// meanings of its operands.
-//
-// 6. For each operand, its encoding is an entry from OperandEncoding
-// (X86DisassemblerDecoderCommon.h) and its type is an entry from
-// OperandType (ibid.). The encoding indicates how to read it from the
-// instruction; the type indicates how to interpret the value once it has
-// been read. For example, a register operand could be stored in the R/M
-// field of the ModR/M byte, the REG field of the ModR/M byte, or added to
-// the main opcode. This is orthogonal from its meaning (an GPR or an XMM
-// register, for instance). Given this information, the operands can be
-// extracted and interpreted.
-//
-// 7. As the last step, the disassembler translates the instruction information
-// and operands into a format understandable by the client - in this case, an
-// MCInst for use by the MC infrastructure.
-//
-// The disassembler is broken broadly into two parts: the table emitter that
-// emits the instruction decode tables discussed above during compilation, and
-// the disassembler itself. The table emitter is documented in more detail in
-// utils/TableGen/X86DisassemblerEmitter.h.
-//
-// X86Disassembler.h contains the public interface for the disassembler,
-// adhering to the MCDisassembler interface.
-// X86Disassembler.cpp contains the code responsible for step 7, and for
-// invoking the decoder to execute steps 1-6.
-// X86DisassemblerDecoderCommon.h contains the definitions needed by both the
-// table emitter and the disassembler.
-// X86DisassemblerDecoder.h contains the public interface of the decoder,
-// factored out into C for possible use by other projects.
-// X86DisassemblerDecoder.c contains the source code of the decoder, which is
-// responsible for steps 1-6.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_X86_DISASSEMBLER_X86DISASSEMBLER_H
-#define LLVM_LIB_TARGET_X86_DISASSEMBLER_X86DISASSEMBLER_H
-
-#include "X86DisassemblerDecoderCommon.h"
-#include "llvm/MC/MCDisassembler.h"
-
-namespace llvm {
-
-class MCInst;
-class MCInstrInfo;
-class MCSubtargetInfo;
-class MemoryObject;
-class raw_ostream;
-
-namespace X86Disassembler {
-
-/// Generic disassembler for all X86 platforms. All each platform class should
-/// have to do is subclass the constructor, and provide a different
-/// disassemblerMode value.
-class X86GenericDisassembler : public MCDisassembler {
- std::unique_ptr<const MCInstrInfo> MII;
-public:
- X86GenericDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx,
- std::unique_ptr<const MCInstrInfo> MII);
-public:
- DecodeStatus getInstruction(MCInst &instr, uint64_t &size,
- ArrayRef<uint8_t> Bytes, uint64_t Address,
- raw_ostream &vStream,
- raw_ostream &cStream) const override;
-
-private:
- DisassemblerMode fMode;
-};
-
-} // namespace X86Disassembler
-
-} // namespace llvm
-
-#endif
diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp
index 040143b155874..b0a150ab564d6 100644
--- a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp
+++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp
@@ -53,7 +53,6 @@ struct ContextDecision {
#define debug(s) do { } while (0)
#endif
-
/*
* contextForAttrs - Client for the instruction context table. Takes a set of
* attributes and returns the appropriate decode context.
@@ -276,8 +275,6 @@ static void dbgprintf(struct InternalInstruction* insn,
va_end(ap);
insn->dlog(insn->dlogArg, buffer);
-
- return;
}
/*
@@ -1453,10 +1450,10 @@ static int readModRM(struct InternalInstruction* insn) {
}
#define GENERIC_FIXUP_FUNC(name, base, prefix) \
- static uint8_t name(struct InternalInstruction *insn, \
- OperandType type, \
- uint8_t index, \
- uint8_t *valid) { \
+ static uint16_t name(struct InternalInstruction *insn, \
+ OperandType type, \
+ uint8_t index, \
+ uint8_t *valid) { \
*valid = 1; \
switch (type) { \
default: \
@@ -1485,7 +1482,6 @@ static int readModRM(struct InternalInstruction* insn) {
case TYPE_XMM128: \
case TYPE_XMM64: \
case TYPE_XMM32: \
- case TYPE_XMM: \
return prefix##_XMM0 + index; \
case TYPE_VK1: \
case TYPE_VK2: \
@@ -1507,6 +1503,10 @@ static int readModRM(struct InternalInstruction* insn) {
return prefix##_DR0 + index; \
case TYPE_CONTROLREG: \
return prefix##_CR0 + index; \
+ case TYPE_BNDR: \
+ if (index > 3) \
+ *valid = 0; \
+ return prefix##_BND0 + index; \
} \
}
@@ -1763,14 +1763,6 @@ static int readOperands(struct InternalInstruction* insn) {
if (Op.encoding != ENCODING_REG && insn->eaDisplacement == EA_DISP_8)
insn->displacement *= 1 << (Op.encoding - ENCODING_RM);
break;
- case ENCODING_CB:
- case ENCODING_CW:
- case ENCODING_CD:
- case ENCODING_CP:
- case ENCODING_CO:
- case ENCODING_CT:
- dbgprintf(insn, "We currently don't hande code-offset encodings");
- return -1;
case ENCODING_IB:
if (sawRegImm) {
/* Saw a register immediate so don't read again and instead split the
diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
index 28a628e5066b3..24d24a265b496 100644
--- a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
+++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
@@ -369,6 +369,12 @@ namespace X86Disassembler {
ENTRY(CR14) \
ENTRY(CR15)
+#define REGS_BOUND \
+ ENTRY(BND0) \
+ ENTRY(BND1) \
+ ENTRY(BND2) \
+ ENTRY(BND3)
+
#define ALL_EA_BASES \
EA_BASES_16BIT \
EA_BASES_32BIT \
@@ -391,6 +397,7 @@ namespace X86Disassembler {
REGS_SEGMENT \
REGS_DEBUG \
REGS_CONTROL \
+ REGS_BOUND \
ENTRY(RIP)
/// \brief All possible values of the base field for effective-address
diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
index 301db72feafbb..0a835b876d905 100644
--- a/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
+++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
@@ -352,12 +352,6 @@ enum ModRMDecisionType {
ENUM_ENTRY(ENCODING_RM_CD64,"R/M operand with CDisp scaling of 64") \
ENUM_ENTRY(ENCODING_VVVV, "Register operand in VEX.vvvv byte.") \
ENUM_ENTRY(ENCODING_WRITEMASK, "Register operand in EVEX.aaa byte.") \
- ENUM_ENTRY(ENCODING_CB, "1-byte code offset (possible new CS value)") \
- ENUM_ENTRY(ENCODING_CW, "2-byte") \
- ENUM_ENTRY(ENCODING_CD, "4-byte") \
- ENUM_ENTRY(ENCODING_CP, "6-byte") \
- ENUM_ENTRY(ENCODING_CO, "8-byte") \
- ENUM_ENTRY(ENCODING_CT, "10-byte") \
ENUM_ENTRY(ENCODING_IB, "1-byte immediate") \
ENUM_ENTRY(ENCODING_IW, "2-byte") \
ENUM_ENTRY(ENCODING_ID, "4-byte") \
@@ -436,14 +430,11 @@ enum OperandEncoding {
ENUM_ENTRY(TYPE_MOFFS16, "2-byte") \
ENUM_ENTRY(TYPE_MOFFS32, "4-byte") \
ENUM_ENTRY(TYPE_MOFFS64, "8-byte") \
- ENUM_ENTRY(TYPE_SREG, "Byte with single bit set: 0 = ES, 1 = CS, " \
- "2 = SS, 3 = DS, 4 = FS, 5 = GS") \
ENUM_ENTRY(TYPE_M32FP, "32-bit IEE754 memory floating-point operand") \
ENUM_ENTRY(TYPE_M64FP, "64-bit") \
ENUM_ENTRY(TYPE_M80FP, "80-bit extended") \
ENUM_ENTRY(TYPE_ST, "Position on the floating-point stack") \
ENUM_ENTRY(TYPE_MM64, "8-byte MMX register") \
- ENUM_ENTRY(TYPE_XMM, "XMM register operand") \
ENUM_ENTRY(TYPE_XMM32, "4-byte XMM register or memory operand") \
ENUM_ENTRY(TYPE_XMM64, "8-byte") \
ENUM_ENTRY(TYPE_XMM128, "16-byte") \
@@ -456,7 +447,6 @@ enum OperandEncoding {
ENUM_ENTRY(TYPE_VK16, "16-bit") \
ENUM_ENTRY(TYPE_VK32, "32-bit") \
ENUM_ENTRY(TYPE_VK64, "64-bit") \
- ENUM_ENTRY(TYPE_XMM0, "Implicit use of XMM0") \
ENUM_ENTRY(TYPE_SEGMENTREG, "Segment register operand") \
ENUM_ENTRY(TYPE_DEBUGREG, "Debug register operand") \
ENUM_ENTRY(TYPE_CONTROLREG, "Control register operand") \
diff --git a/lib/Target/X86/InstPrinter/Makefile b/lib/Target/X86/InstPrinter/Makefile
deleted file mode 100644
index c82aa330a20cc..0000000000000
--- a/lib/Target/X86/InstPrinter/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/X86/AsmPrinter/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMX86AsmPrinter
-
-# Hack: we need to include 'main' x86 target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
index b4c0bc4cd4d9a..3a5d056888a1c 100644
--- a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
+++ b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
@@ -25,7 +25,6 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/FormattedStream.h"
-#include <map>
using namespace llvm;
#define DEBUG_TYPE "asm-printer"
@@ -166,17 +165,25 @@ void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
if (Op.isReg()) {
printRegName(O, Op.getReg());
} else if (Op.isImm()) {
- // Print X86 immediates as signed values.
- O << markup("<imm:") << '$' << formatImm((int64_t)Op.getImm())
- << markup(">");
+ // Print immediates as signed values.
+ int64_t Imm = Op.getImm();
+ O << markup("<imm:") << '$' << formatImm(Imm) << markup(">");
+
+ // TODO: This should be in a helper function in the base class, so it can
+ // be used by other printers.
// If there are no instruction-specific comments, add a comment clarifying
// the hex value of the immediate operand when it isn't in the range
// [-256,255].
- if (CommentStream && !HasCustomInstComment &&
- (Op.getImm() > 255 || Op.getImm() < -256))
- *CommentStream << format("imm = 0x%" PRIX64 "\n", (uint64_t)Op.getImm());
-
+ if (CommentStream && !HasCustomInstComment && (Imm > 255 || Imm < -256)) {
+ // Don't print unnecessary hex sign bits.
+ if (Imm == (int16_t)(Imm))
+ *CommentStream << format("imm = 0x%" PRIX16 "\n", (uint16_t)Imm);
+ else if (Imm == (int32_t)(Imm))
+ *CommentStream << format("imm = 0x%" PRIX32 "\n", (uint32_t)Imm);
+ else
+ *CommentStream << format("imm = 0x%" PRIX64 "\n", (uint64_t)Imm);
+ }
} else {
assert(Op.isExpr() && "unknown operand kind in printOperand");
O << markup("<imm:") << '$';
diff --git a/lib/Target/X86/InstPrinter/X86InstComments.cpp b/lib/Target/X86/InstPrinter/X86InstComments.cpp
index 73f654cba38c9..f5379566b619f 100644
--- a/lib/Target/X86/InstPrinter/X86InstComments.cpp
+++ b/lib/Target/X86/InstPrinter/X86InstComments.cpp
@@ -21,6 +21,143 @@
using namespace llvm;
+#define CASE_SSE_INS_COMMON(Inst, src) \
+ case X86::Inst##src:
+
+#define CASE_AVX_INS_COMMON(Inst, Suffix, src) \
+ case X86::V##Inst##Suffix##src:
+
+#define CASE_MASK_INS_COMMON(Inst, Suffix, src) \
+ case X86::V##Inst##Suffix##src##k:
+
+#define CASE_MASKZ_INS_COMMON(Inst, Suffix, src) \
+ case X86::V##Inst##Suffix##src##kz:
+
+#define CASE_AVX512_INS_COMMON(Inst, Suffix, src) \
+ CASE_AVX_INS_COMMON(Inst, Suffix, src) \
+ CASE_MASK_INS_COMMON(Inst, Suffix, src) \
+ CASE_MASKZ_INS_COMMON(Inst, Suffix, src)
+
+#define CASE_MOVDUP(Inst, src) \
+ CASE_AVX512_INS_COMMON(Inst, Z, r##src) \
+ CASE_AVX512_INS_COMMON(Inst, Z256, r##src) \
+ CASE_AVX512_INS_COMMON(Inst, Z128, r##src) \
+ CASE_AVX_INS_COMMON(Inst, , r##src) \
+ CASE_AVX_INS_COMMON(Inst, Y, r##src) \
+ CASE_SSE_INS_COMMON(Inst, r##src)
+
+#define CASE_MASK_MOVDUP(Inst, src) \
+ CASE_MASK_INS_COMMON(Inst, Z, r##src) \
+ CASE_MASK_INS_COMMON(Inst, Z256, r##src) \
+ CASE_MASK_INS_COMMON(Inst, Z128, r##src)
+
+#define CASE_MASKZ_MOVDUP(Inst, src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z, r##src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z256, r##src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z128, r##src)
+
+#define CASE_PMOVZX(Inst, src) \
+ CASE_AVX512_INS_COMMON(Inst, Z, r##src) \
+ CASE_AVX512_INS_COMMON(Inst, Z256, r##src) \
+ CASE_AVX512_INS_COMMON(Inst, Z128, r##src) \
+ CASE_AVX_INS_COMMON(Inst, , r##src) \
+ CASE_AVX_INS_COMMON(Inst, Y, r##src) \
+ CASE_SSE_INS_COMMON(Inst, r##src)
+
+#define CASE_MASK_PMOVZX(Inst, src) \
+ CASE_MASK_INS_COMMON(Inst, Z, r##src) \
+ CASE_MASK_INS_COMMON(Inst, Z256, r##src) \
+ CASE_MASK_INS_COMMON(Inst, Z128, r##src)
+
+#define CASE_MASKZ_PMOVZX(Inst, src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z, r##src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z256, r##src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z128, r##src)
+
+#define CASE_UNPCK(Inst, src) \
+ CASE_AVX512_INS_COMMON(Inst, Z, r##src) \
+ CASE_AVX512_INS_COMMON(Inst, Z256, r##src) \
+ CASE_AVX512_INS_COMMON(Inst, Z128, r##src) \
+ CASE_AVX_INS_COMMON(Inst, , r##src) \
+ CASE_AVX_INS_COMMON(Inst, Y, r##src) \
+ CASE_SSE_INS_COMMON(Inst, r##src)
+
+#define CASE_MASK_UNPCK(Inst, src) \
+ CASE_MASK_INS_COMMON(Inst, Z, r##src) \
+ CASE_MASK_INS_COMMON(Inst, Z256, r##src) \
+ CASE_MASK_INS_COMMON(Inst, Z128, r##src)
+
+#define CASE_MASKZ_UNPCK(Inst, src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z, r##src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z256, r##src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z128, r##src)
+
+#define CASE_SHUF(Inst, suf) \
+ CASE_AVX512_INS_COMMON(Inst, Z, suf) \
+ CASE_AVX512_INS_COMMON(Inst, Z256, suf) \
+ CASE_AVX512_INS_COMMON(Inst, Z128, suf) \
+ CASE_AVX_INS_COMMON(Inst, , suf) \
+ CASE_AVX_INS_COMMON(Inst, Y, suf) \
+ CASE_SSE_INS_COMMON(Inst, suf)
+
+#define CASE_MASK_SHUF(Inst, src) \
+ CASE_MASK_INS_COMMON(Inst, Z, r##src##i) \
+ CASE_MASK_INS_COMMON(Inst, Z256, r##src##i) \
+ CASE_MASK_INS_COMMON(Inst, Z128, r##src##i)
+
+#define CASE_MASKZ_SHUF(Inst, src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z, r##src##i) \
+ CASE_MASKZ_INS_COMMON(Inst, Z256, r##src##i) \
+ CASE_MASKZ_INS_COMMON(Inst, Z128, r##src##i)
+
+#define CASE_VPERMILPI(Inst, src) \
+ CASE_AVX512_INS_COMMON(Inst, Z, src##i) \
+ CASE_AVX512_INS_COMMON(Inst, Z256, src##i) \
+ CASE_AVX512_INS_COMMON(Inst, Z128, src##i) \
+ CASE_AVX_INS_COMMON(Inst, , src##i) \
+ CASE_AVX_INS_COMMON(Inst, Y, src##i)
+
+#define CASE_MASK_VPERMILPI(Inst, src) \
+ CASE_MASK_INS_COMMON(Inst, Z, src##i) \
+ CASE_MASK_INS_COMMON(Inst, Z256, src##i) \
+ CASE_MASK_INS_COMMON(Inst, Z128, src##i)
+
+#define CASE_MASKZ_VPERMILPI(Inst, src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z, src##i) \
+ CASE_MASKZ_INS_COMMON(Inst, Z256, src##i) \
+ CASE_MASKZ_INS_COMMON(Inst, Z128, src##i)
+
+#define CASE_VPERM(Inst, src) \
+ CASE_AVX512_INS_COMMON(Inst, Z, src##i) \
+ CASE_AVX512_INS_COMMON(Inst, Z256, src##i) \
+ CASE_AVX_INS_COMMON(Inst, Y, src##i)
+
+#define CASE_MASK_VPERM(Inst, src) \
+ CASE_MASK_INS_COMMON(Inst, Z, src##i) \
+ CASE_MASK_INS_COMMON(Inst, Z256, src##i)
+
+#define CASE_MASKZ_VPERM(Inst, src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z, src##i) \
+ CASE_MASKZ_INS_COMMON(Inst, Z256, src##i)
+
+#define CASE_VSHUF(Inst, src) \
+ CASE_AVX512_INS_COMMON(SHUFF##Inst, Z, r##src##i) \
+ CASE_AVX512_INS_COMMON(SHUFI##Inst, Z, r##src##i) \
+ CASE_AVX512_INS_COMMON(SHUFF##Inst, Z256, r##src##i) \
+ CASE_AVX512_INS_COMMON(SHUFI##Inst, Z256, r##src##i)
+
+#define CASE_MASK_VSHUF(Inst, src) \
+ CASE_MASK_INS_COMMON(SHUFF##Inst, Z, r##src##i) \
+ CASE_MASK_INS_COMMON(SHUFI##Inst, Z, r##src##i) \
+ CASE_MASK_INS_COMMON(SHUFF##Inst, Z256, r##src##i) \
+ CASE_MASK_INS_COMMON(SHUFI##Inst, Z256, r##src##i)
+
+#define CASE_MASKZ_VSHUF(Inst, src) \
+ CASE_MASKZ_INS_COMMON(SHUFF##Inst, Z, r##src##i) \
+ CASE_MASKZ_INS_COMMON(SHUFI##Inst, Z, r##src##i) \
+ CASE_MASKZ_INS_COMMON(SHUFF##Inst, Z256, r##src##i) \
+ CASE_MASKZ_INS_COMMON(SHUFI##Inst, Z256, r##src##i)
+
static unsigned getVectorRegSize(unsigned RegNo) {
if (X86::ZMM0 <= RegNo && RegNo <= X86::ZMM31)
return 512;
@@ -41,159 +178,184 @@ static MVT getRegOperandVectorVT(const MCInst *MI, const MVT &ScalarVT,
getVectorRegSize(OpReg)/ScalarVT.getSizeInBits());
}
-/// \brief Extracts the src/dst types for a given zero extension instruction.
-/// \note While the number of elements in DstVT type correct, the
-/// number in the SrcVT type is expanded to fill the src xmm register and the
-/// upper elements may not be included in the dst xmm/ymm register.
-static void getZeroExtensionTypes(const MCInst *MI, MVT &SrcVT, MVT &DstVT) {
+/// \brief Extracts the dst type for a given zero extension instruction.
+static MVT getZeroExtensionResultType(const MCInst *MI) {
switch (MI->getOpcode()) {
default:
llvm_unreachable("Unknown zero extension instruction");
- // i8 zero extension
- case X86::PMOVZXBWrm:
- case X86::PMOVZXBWrr:
- case X86::VPMOVZXBWrm:
- case X86::VPMOVZXBWrr:
- SrcVT = MVT::v16i8;
- DstVT = MVT::v8i16;
- break;
- case X86::VPMOVZXBWYrm:
- case X86::VPMOVZXBWYrr:
- SrcVT = MVT::v16i8;
- DstVT = MVT::v16i16;
- break;
- case X86::PMOVZXBDrm:
- case X86::PMOVZXBDrr:
- case X86::VPMOVZXBDrm:
- case X86::VPMOVZXBDrr:
- SrcVT = MVT::v16i8;
- DstVT = MVT::v4i32;
- break;
- case X86::VPMOVZXBDYrm:
- case X86::VPMOVZXBDYrr:
- SrcVT = MVT::v16i8;
- DstVT = MVT::v8i32;
- break;
- case X86::PMOVZXBQrm:
- case X86::PMOVZXBQrr:
- case X86::VPMOVZXBQrm:
- case X86::VPMOVZXBQrr:
- SrcVT = MVT::v16i8;
- DstVT = MVT::v2i64;
- break;
- case X86::VPMOVZXBQYrm:
- case X86::VPMOVZXBQYrr:
- SrcVT = MVT::v16i8;
- DstVT = MVT::v4i64;
- break;
- // i16 zero extension
- case X86::PMOVZXWDrm:
- case X86::PMOVZXWDrr:
- case X86::VPMOVZXWDrm:
- case X86::VPMOVZXWDrr:
- SrcVT = MVT::v8i16;
- DstVT = MVT::v4i32;
- break;
- case X86::VPMOVZXWDYrm:
- case X86::VPMOVZXWDYrr:
- SrcVT = MVT::v8i16;
- DstVT = MVT::v8i32;
- break;
- case X86::PMOVZXWQrm:
- case X86::PMOVZXWQrr:
- case X86::VPMOVZXWQrm:
- case X86::VPMOVZXWQrr:
- SrcVT = MVT::v8i16;
- DstVT = MVT::v2i64;
- break;
- case X86::VPMOVZXWQYrm:
- case X86::VPMOVZXWQYrr:
- SrcVT = MVT::v8i16;
- DstVT = MVT::v4i64;
- break;
- // i32 zero extension
- case X86::PMOVZXDQrm:
- case X86::PMOVZXDQrr:
- case X86::VPMOVZXDQrm:
- case X86::VPMOVZXDQrr:
- SrcVT = MVT::v4i32;
- DstVT = MVT::v2i64;
- break;
- case X86::VPMOVZXDQYrm:
- case X86::VPMOVZXDQYrr:
- SrcVT = MVT::v4i32;
- DstVT = MVT::v4i64;
- break;
+ // zero extension to i16
+ CASE_PMOVZX(PMOVZXBW, m)
+ CASE_PMOVZX(PMOVZXBW, r)
+ return getRegOperandVectorVT(MI, MVT::i16, 0);
+ // zero extension to i32
+ CASE_PMOVZX(PMOVZXBD, m)
+ CASE_PMOVZX(PMOVZXBD, r)
+ CASE_PMOVZX(PMOVZXWD, m)
+ CASE_PMOVZX(PMOVZXWD, r)
+ return getRegOperandVectorVT(MI, MVT::i32, 0);
+ // zero extension to i64
+ CASE_PMOVZX(PMOVZXBQ, m)
+ CASE_PMOVZX(PMOVZXBQ, r)
+ CASE_PMOVZX(PMOVZXWQ, m)
+ CASE_PMOVZX(PMOVZXWQ, r)
+ CASE_PMOVZX(PMOVZXDQ, m)
+ CASE_PMOVZX(PMOVZXDQ, r)
+ return getRegOperandVectorVT(MI, MVT::i64, 0);
}
}
-#define CASE_MASK_INS_COMMON(Inst, Suffix, src) \
- case X86::V##Inst##Suffix##src: \
- case X86::V##Inst##Suffix##src##k: \
- case X86::V##Inst##Suffix##src##kz:
-
-#define CASE_SSE_INS_COMMON(Inst, src) \
- case X86::Inst##src:
-
-#define CASE_AVX_INS_COMMON(Inst, Suffix, src) \
- case X86::V##Inst##Suffix##src:
+/// Wraps the destination register name with AVX512 mask/maskz filtering.
+static std::string getMaskName(const MCInst *MI, const char *DestName,
+ const char *(*getRegName)(unsigned)) {
+ std::string OpMaskName(DestName);
-#define CASE_MOVDUP(Inst, src) \
- CASE_MASK_INS_COMMON(Inst, Z, r##src) \
- CASE_MASK_INS_COMMON(Inst, Z256, r##src) \
- CASE_MASK_INS_COMMON(Inst, Z128, r##src) \
- CASE_AVX_INS_COMMON(Inst, , r##src) \
- CASE_AVX_INS_COMMON(Inst, Y, r##src) \
- CASE_SSE_INS_COMMON(Inst, r##src) \
-
-#define CASE_UNPCK(Inst, src) \
- CASE_MASK_INS_COMMON(Inst, Z, r##src) \
- CASE_MASK_INS_COMMON(Inst, Z256, r##src) \
- CASE_MASK_INS_COMMON(Inst, Z128, r##src) \
- CASE_AVX_INS_COMMON(Inst, , r##src) \
- CASE_AVX_INS_COMMON(Inst, Y, r##src) \
- CASE_SSE_INS_COMMON(Inst, r##src) \
-
-#define CASE_SHUF(Inst, src) \
- CASE_MASK_INS_COMMON(Inst, Z, r##src##i) \
- CASE_MASK_INS_COMMON(Inst, Z256, r##src##i) \
- CASE_MASK_INS_COMMON(Inst, Z128, r##src##i) \
- CASE_AVX_INS_COMMON(Inst, , r##src##i) \
- CASE_AVX_INS_COMMON(Inst, Y, r##src##i) \
- CASE_SSE_INS_COMMON(Inst, r##src##i) \
-
-#define CASE_VPERM(Inst, src) \
- CASE_MASK_INS_COMMON(Inst, Z, src##i) \
- CASE_MASK_INS_COMMON(Inst, Z256, src##i) \
- CASE_MASK_INS_COMMON(Inst, Z128, src##i) \
- CASE_AVX_INS_COMMON(Inst, , src##i) \
- CASE_AVX_INS_COMMON(Inst, Y, src##i) \
+ bool MaskWithZero = false;
+ const char *MaskRegName = nullptr;
-#define CASE_VSHUF(Inst, src) \
- CASE_MASK_INS_COMMON(SHUFF##Inst, Z, r##src##i) \
- CASE_MASK_INS_COMMON(SHUFI##Inst, Z, r##src##i) \
- CASE_MASK_INS_COMMON(SHUFF##Inst, Z256, r##src##i) \
- CASE_MASK_INS_COMMON(SHUFI##Inst, Z256, r##src##i) \
-
-/// \brief Extracts the types and if it has memory operand for a given
-/// (SHUFF32x4/SHUFF64x2/SHUFI32x4/SHUFI64x2) instruction.
-static void getVSHUF64x2FamilyInfo(const MCInst *MI, MVT &VT, bool &HasMemOp) {
- HasMemOp = false;
switch (MI->getOpcode()) {
default:
- llvm_unreachable("Unknown VSHUF64x2 family instructions.");
+ return OpMaskName;
+ CASE_MASKZ_MOVDUP(MOVDDUP, m)
+ CASE_MASKZ_MOVDUP(MOVDDUP, r)
+ CASE_MASKZ_MOVDUP(MOVSHDUP, m)
+ CASE_MASKZ_MOVDUP(MOVSHDUP, r)
+ CASE_MASKZ_MOVDUP(MOVSLDUP, m)
+ CASE_MASKZ_MOVDUP(MOVSLDUP, r)
+ CASE_MASKZ_PMOVZX(PMOVZXBD, m)
+ CASE_MASKZ_PMOVZX(PMOVZXBD, r)
+ CASE_MASKZ_PMOVZX(PMOVZXBQ, m)
+ CASE_MASKZ_PMOVZX(PMOVZXBQ, r)
+ CASE_MASKZ_PMOVZX(PMOVZXBW, m)
+ CASE_MASKZ_PMOVZX(PMOVZXBW, r)
+ CASE_MASKZ_PMOVZX(PMOVZXDQ, m)
+ CASE_MASKZ_PMOVZX(PMOVZXDQ, r)
+ CASE_MASKZ_PMOVZX(PMOVZXWD, m)
+ CASE_MASKZ_PMOVZX(PMOVZXWD, r)
+ CASE_MASKZ_PMOVZX(PMOVZXWQ, m)
+ CASE_MASKZ_PMOVZX(PMOVZXWQ, r)
+ CASE_MASKZ_UNPCK(PUNPCKHBW, m)
+ CASE_MASKZ_UNPCK(PUNPCKHBW, r)
+ CASE_MASKZ_UNPCK(PUNPCKHWD, m)
+ CASE_MASKZ_UNPCK(PUNPCKHWD, r)
+ CASE_MASKZ_UNPCK(PUNPCKHDQ, m)
+ CASE_MASKZ_UNPCK(PUNPCKHDQ, r)
+ CASE_MASKZ_UNPCK(PUNPCKLBW, m)
+ CASE_MASKZ_UNPCK(PUNPCKLBW, r)
+ CASE_MASKZ_UNPCK(PUNPCKLWD, m)
+ CASE_MASKZ_UNPCK(PUNPCKLWD, r)
+ CASE_MASKZ_UNPCK(PUNPCKLDQ, m)
+ CASE_MASKZ_UNPCK(PUNPCKLDQ, r)
+ CASE_MASKZ_UNPCK(UNPCKHPD, m)
+ CASE_MASKZ_UNPCK(UNPCKHPD, r)
+ CASE_MASKZ_UNPCK(UNPCKHPS, m)
+ CASE_MASKZ_UNPCK(UNPCKHPS, r)
+ CASE_MASKZ_UNPCK(UNPCKLPD, m)
+ CASE_MASKZ_UNPCK(UNPCKLPD, r)
+ CASE_MASKZ_UNPCK(UNPCKLPS, m)
+ CASE_MASKZ_UNPCK(UNPCKLPS, r)
+ CASE_MASKZ_SHUF(PALIGNR, r)
+ CASE_MASKZ_SHUF(PALIGNR, m)
+ CASE_MASKZ_SHUF(SHUFPD, m)
+ CASE_MASKZ_SHUF(SHUFPD, r)
+ CASE_MASKZ_SHUF(SHUFPS, m)
+ CASE_MASKZ_SHUF(SHUFPS, r)
+ CASE_MASKZ_VPERMILPI(PERMILPD, m)
+ CASE_MASKZ_VPERMILPI(PERMILPD, r)
+ CASE_MASKZ_VPERMILPI(PERMILPS, m)
+ CASE_MASKZ_VPERMILPI(PERMILPS, r)
+ CASE_MASKZ_VPERMILPI(PSHUFD, m)
+ CASE_MASKZ_VPERMILPI(PSHUFD, r)
+ CASE_MASKZ_VPERMILPI(PSHUFHW, m)
+ CASE_MASKZ_VPERMILPI(PSHUFHW, r)
+ CASE_MASKZ_VPERMILPI(PSHUFLW, m)
+ CASE_MASKZ_VPERMILPI(PSHUFLW, r)
+ CASE_MASKZ_VPERM(PERMPD, m)
+ CASE_MASKZ_VPERM(PERMPD, r)
+ CASE_MASKZ_VPERM(PERMQ, m)
+ CASE_MASKZ_VPERM(PERMQ, r)
+ CASE_MASKZ_VSHUF(64X2, m)
+ CASE_MASKZ_VSHUF(64X2, r)
+ CASE_MASKZ_VSHUF(32X4, m)
+ CASE_MASKZ_VSHUF(32X4, r)
+ MaskWithZero = true;
+ MaskRegName = getRegName(MI->getOperand(1).getReg());
break;
- CASE_VSHUF(64X2, m)
- HasMemOp = true; // FALL THROUGH.
- CASE_VSHUF(64X2, r)
- VT = getRegOperandVectorVT(MI, MVT::i64, 0);
- break;
- CASE_VSHUF(32X4, m)
- HasMemOp = true; // FALL THROUGH.
- CASE_VSHUF(32X4, r)
- VT = getRegOperandVectorVT(MI, MVT::i32, 0);
+ CASE_MASK_MOVDUP(MOVDDUP, m)
+ CASE_MASK_MOVDUP(MOVDDUP, r)
+ CASE_MASK_MOVDUP(MOVSHDUP, m)
+ CASE_MASK_MOVDUP(MOVSHDUP, r)
+ CASE_MASK_MOVDUP(MOVSLDUP, m)
+ CASE_MASK_MOVDUP(MOVSLDUP, r)
+ CASE_MASK_PMOVZX(PMOVZXBD, m)
+ CASE_MASK_PMOVZX(PMOVZXBD, r)
+ CASE_MASK_PMOVZX(PMOVZXBQ, m)
+ CASE_MASK_PMOVZX(PMOVZXBQ, r)
+ CASE_MASK_PMOVZX(PMOVZXBW, m)
+ CASE_MASK_PMOVZX(PMOVZXBW, r)
+ CASE_MASK_PMOVZX(PMOVZXDQ, m)
+ CASE_MASK_PMOVZX(PMOVZXDQ, r)
+ CASE_MASK_PMOVZX(PMOVZXWD, m)
+ CASE_MASK_PMOVZX(PMOVZXWD, r)
+ CASE_MASK_PMOVZX(PMOVZXWQ, m)
+ CASE_MASK_PMOVZX(PMOVZXWQ, r)
+ CASE_MASK_UNPCK(PUNPCKHBW, m)
+ CASE_MASK_UNPCK(PUNPCKHBW, r)
+ CASE_MASK_UNPCK(PUNPCKHWD, m)
+ CASE_MASK_UNPCK(PUNPCKHWD, r)
+ CASE_MASK_UNPCK(PUNPCKHDQ, m)
+ CASE_MASK_UNPCK(PUNPCKHDQ, r)
+ CASE_MASK_UNPCK(PUNPCKLBW, m)
+ CASE_MASK_UNPCK(PUNPCKLBW, r)
+ CASE_MASK_UNPCK(PUNPCKLWD, m)
+ CASE_MASK_UNPCK(PUNPCKLWD, r)
+ CASE_MASK_UNPCK(PUNPCKLDQ, m)
+ CASE_MASK_UNPCK(PUNPCKLDQ, r)
+ CASE_MASK_UNPCK(UNPCKHPD, m)
+ CASE_MASK_UNPCK(UNPCKHPD, r)
+ CASE_MASK_UNPCK(UNPCKHPS, m)
+ CASE_MASK_UNPCK(UNPCKHPS, r)
+ CASE_MASK_UNPCK(UNPCKLPD, m)
+ CASE_MASK_UNPCK(UNPCKLPD, r)
+ CASE_MASK_UNPCK(UNPCKLPS, m)
+ CASE_MASK_UNPCK(UNPCKLPS, r)
+ CASE_MASK_SHUF(PALIGNR, r)
+ CASE_MASK_SHUF(PALIGNR, m)
+ CASE_MASK_SHUF(SHUFPD, m)
+ CASE_MASK_SHUF(SHUFPD, r)
+ CASE_MASK_SHUF(SHUFPS, m)
+ CASE_MASK_SHUF(SHUFPS, r)
+ CASE_MASK_VPERMILPI(PERMILPD, m)
+ CASE_MASK_VPERMILPI(PERMILPD, r)
+ CASE_MASK_VPERMILPI(PERMILPS, m)
+ CASE_MASK_VPERMILPI(PERMILPS, r)
+ CASE_MASK_VPERMILPI(PSHUFD, m)
+ CASE_MASK_VPERMILPI(PSHUFD, r)
+ CASE_MASK_VPERMILPI(PSHUFHW, m)
+ CASE_MASK_VPERMILPI(PSHUFHW, r)
+ CASE_MASK_VPERMILPI(PSHUFLW, m)
+ CASE_MASK_VPERMILPI(PSHUFLW, r)
+ CASE_MASK_VPERM(PERMPD, m)
+ CASE_MASK_VPERM(PERMPD, r)
+ CASE_MASK_VPERM(PERMQ, m)
+ CASE_MASK_VPERM(PERMQ, r)
+ CASE_MASK_VSHUF(64X2, m)
+ CASE_MASK_VSHUF(64X2, r)
+ CASE_MASK_VSHUF(32X4, m)
+ CASE_MASK_VSHUF(32X4, r)
+ MaskRegName = getRegName(MI->getOperand(2).getReg());
break;
}
+
+ // MASK: zmmX {%kY}
+ OpMaskName += " {%";
+ OpMaskName += MaskRegName;
+ OpMaskName += "}";
+
+ // MASKZ: zmmX {%kY} {z}
+ if (MaskWithZero)
+ OpMaskName += " {z}";
+
+ return OpMaskName;
}
//===----------------------------------------------------------------------===//
@@ -208,6 +370,8 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
// If this is a shuffle operation, the switch should fill in this state.
SmallVector<int, 8> ShuffleMask;
const char *DestName = nullptr, *Src1Name = nullptr, *Src2Name = nullptr;
+ unsigned NumOperands = MI->getNumOperands();
+ bool RegForm = false;
switch (MI->getOpcode()) {
default:
@@ -222,9 +386,9 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::BLENDPDrmi:
case X86::VBLENDPDrmi:
case X86::VBLENDPDYrmi:
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodeBLENDMask(getRegOperandVectorVT(MI, MVT::f64, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
@@ -238,9 +402,9 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::BLENDPSrmi:
case X86::VBLENDPSrmi:
case X86::VBLENDPSYrmi:
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodeBLENDMask(getRegOperandVectorVT(MI, MVT::f32, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
@@ -254,9 +418,9 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::PBLENDWrmi:
case X86::VPBLENDWrmi:
case X86::VPBLENDWYrmi:
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodeBLENDMask(getRegOperandVectorVT(MI, MVT::i16, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
@@ -268,9 +432,9 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
// FALL THROUGH.
case X86::VPBLENDDrmi:
case X86::VPBLENDDYrmi:
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodeBLENDMask(getRegOperandVectorVT(MI, MVT::i32, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
@@ -278,14 +442,16 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::INSERTPSrr:
case X86::VINSERTPSrr:
+ case X86::VINSERTPSzrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::INSERTPSrm:
case X86::VINSERTPSrm:
+ case X86::VINSERTPSzrm:
DestName = getRegName(MI->getOperand(0).getReg());
Src1Name = getRegName(MI->getOperand(1).getReg());
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
- DecodeINSERTPSMask(MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ if (MI->getOperand(NumOperands - 1).isImm())
+ DecodeINSERTPSMask(MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
@@ -307,8 +473,40 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
DecodeMOVHLPSMask(2, ShuffleMask);
break;
+ case X86::MOVHPDrm:
+ case X86::VMOVHPDrm:
+ case X86::VMOVHPDZ128rm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeInsertElementMask(MVT::v2f64, 1, 1, ShuffleMask);
+ break;
+
+ case X86::MOVHPSrm:
+ case X86::VMOVHPSrm:
+ case X86::VMOVHPSZ128rm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeInsertElementMask(MVT::v4f32, 2, 2, ShuffleMask);
+ break;
+
+ case X86::MOVLPDrm:
+ case X86::VMOVLPDrm:
+ case X86::VMOVLPDZ128rm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeInsertElementMask(MVT::v2f64, 0, 1, ShuffleMask);
+ break;
+
+ case X86::MOVLPSrm:
+ case X86::VMOVLPSrm:
+ case X86::VMOVLPSZ128rm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeInsertElementMask(MVT::v4f32, 0, 2, ShuffleMask);
+ break;
+
CASE_MOVDUP(MOVSLDUP, r)
- Src1Name = getRegName(MI->getOperand(MI->getNumOperands() - 1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
// FALL THROUGH.
CASE_MOVDUP(MOVSLDUP, m)
DestName = getRegName(MI->getOperand(0).getReg());
@@ -316,7 +514,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
break;
CASE_MOVDUP(MOVSHDUP, r)
- Src1Name = getRegName(MI->getOperand(MI->getNumOperands() - 1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
// FALL THROUGH.
CASE_MOVDUP(MOVSHDUP, m)
DestName = getRegName(MI->getOperand(0).getReg());
@@ -324,7 +522,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
break;
CASE_MOVDUP(MOVDDUP, r)
- Src1Name = getRegName(MI->getOperand(MI->getNumOperands() - 1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
// FALL THROUGH.
CASE_MOVDUP(MOVDDUP, m)
DestName = getRegName(MI->getOperand(0).getReg());
@@ -334,83 +532,80 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::PSLLDQri:
case X86::VPSLLDQri:
case X86::VPSLLDQYri:
+ case X86::VPSLLDQZ128rr:
+ case X86::VPSLLDQZ256rr:
+ case X86::VPSLLDQZ512rr:
Src1Name = getRegName(MI->getOperand(1).getReg());
+ case X86::VPSLLDQZ128rm:
+ case X86::VPSLLDQZ256rm:
+ case X86::VPSLLDQZ512rm:
DestName = getRegName(MI->getOperand(0).getReg());
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePSLLDQMask(getRegOperandVectorVT(MI, MVT::i8, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
case X86::PSRLDQri:
case X86::VPSRLDQri:
case X86::VPSRLDQYri:
+ case X86::VPSRLDQZ128rr:
+ case X86::VPSRLDQZ256rr:
+ case X86::VPSRLDQZ512rr:
Src1Name = getRegName(MI->getOperand(1).getReg());
+ case X86::VPSRLDQZ128rm:
+ case X86::VPSRLDQZ256rm:
+ case X86::VPSRLDQZ512rm:
DestName = getRegName(MI->getOperand(0).getReg());
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePSRLDQMask(getRegOperandVectorVT(MI, MVT::i8, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
- case X86::PALIGNR128rr:
- case X86::VPALIGNR128rr:
- case X86::VPALIGNR256rr:
- Src1Name = getRegName(MI->getOperand(2).getReg());
+ CASE_SHUF(PALIGNR, rri)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
+ RegForm = true;
// FALL THROUGH.
- case X86::PALIGNR128rm:
- case X86::VPALIGNR128rm:
- case X86::VPALIGNR256rm:
- Src2Name = getRegName(MI->getOperand(1).getReg());
+ CASE_SHUF(PALIGNR, rmi)
+ Src2Name = getRegName(MI->getOperand(NumOperands-(RegForm?3:7)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePALIGNRMask(getRegOperandVectorVT(MI, MVT::i8, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
- case X86::PSHUFDri:
- case X86::VPSHUFDri:
- case X86::VPSHUFDYri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ CASE_SHUF(PSHUFD, ri)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
// FALL THROUGH.
- case X86::PSHUFDmi:
- case X86::VPSHUFDmi:
- case X86::VPSHUFDYmi:
+ CASE_SHUF(PSHUFD, mi)
DestName = getRegName(MI->getOperand(0).getReg());
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePSHUFMask(getRegOperandVectorVT(MI, MVT::i32, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
- case X86::PSHUFHWri:
- case X86::VPSHUFHWri:
- case X86::VPSHUFHWYri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ CASE_SHUF(PSHUFHW, ri)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
// FALL THROUGH.
- case X86::PSHUFHWmi:
- case X86::VPSHUFHWmi:
- case X86::VPSHUFHWYmi:
+ CASE_SHUF(PSHUFHW, mi)
DestName = getRegName(MI->getOperand(0).getReg());
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePSHUFHWMask(getRegOperandVectorVT(MI, MVT::i16, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
- case X86::PSHUFLWri:
- case X86::VPSHUFLWri:
- case X86::VPSHUFLWYri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ CASE_SHUF(PSHUFLW, ri)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
// FALL THROUGH.
- case X86::PSHUFLWmi:
- case X86::VPSHUFLWmi:
- case X86::VPSHUFLWYmi:
+ CASE_SHUF(PSHUFLW, mi)
DestName = getRegName(MI->getOperand(0).getReg());
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePSHUFLWMask(getRegOperandVectorVT(MI, MVT::i16, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
@@ -419,9 +614,9 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
// FALL THROUGH.
case X86::MMX_PSHUFWmi:
DestName = getRegName(MI->getOperand(0).getReg());
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePSHUFMask(MVT::v4i16,
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
@@ -435,188 +630,204 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_UNPCK(PUNPCKHBW, r)
case X86::MMX_PUNPCKHBWirr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(PUNPCKHBW, m)
case X86::MMX_PUNPCKHBWirm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeUNPCKHMask(getRegOperandVectorVT(MI, MVT::i8, 0), ShuffleMask);
break;
CASE_UNPCK(PUNPCKHWD, r)
case X86::MMX_PUNPCKHWDirr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(PUNPCKHWD, m)
case X86::MMX_PUNPCKHWDirm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeUNPCKHMask(getRegOperandVectorVT(MI, MVT::i16, 0), ShuffleMask);
break;
CASE_UNPCK(PUNPCKHDQ, r)
case X86::MMX_PUNPCKHDQirr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(PUNPCKHDQ, m)
case X86::MMX_PUNPCKHDQirm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeUNPCKHMask(getRegOperandVectorVT(MI, MVT::i32, 0), ShuffleMask);
break;
CASE_UNPCK(PUNPCKHQDQ, r)
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(PUNPCKHQDQ, m)
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeUNPCKHMask(getRegOperandVectorVT(MI, MVT::i64, 0), ShuffleMask);
break;
CASE_UNPCK(PUNPCKLBW, r)
case X86::MMX_PUNPCKLBWirr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(PUNPCKLBW, m)
case X86::MMX_PUNPCKLBWirm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeUNPCKLMask(getRegOperandVectorVT(MI, MVT::i8, 0), ShuffleMask);
break;
CASE_UNPCK(PUNPCKLWD, r)
case X86::MMX_PUNPCKLWDirr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(PUNPCKLWD, m)
case X86::MMX_PUNPCKLWDirm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeUNPCKLMask(getRegOperandVectorVT(MI, MVT::i16, 0), ShuffleMask);
break;
CASE_UNPCK(PUNPCKLDQ, r)
case X86::MMX_PUNPCKLDQirr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(PUNPCKLDQ, m)
case X86::MMX_PUNPCKLDQirm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeUNPCKLMask(getRegOperandVectorVT(MI, MVT::i32, 0), ShuffleMask);
break;
CASE_UNPCK(PUNPCKLQDQ, r)
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(PUNPCKLQDQ, m)
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeUNPCKLMask(getRegOperandVectorVT(MI, MVT::i64, 0), ShuffleMask);
break;
- CASE_SHUF(SHUFPD, r)
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ CASE_SHUF(SHUFPD, rri)
+ Src2Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
+ RegForm = true;
// FALL THROUGH.
- CASE_SHUF(SHUFPD, m)
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ CASE_SHUF(SHUFPD, rmi)
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodeSHUFPMask(getRegOperandVectorVT(MI, MVT::f64, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?3:7)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
- CASE_SHUF(SHUFPS, r)
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ CASE_SHUF(SHUFPS, rri)
+ Src2Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
+ RegForm = true;
// FALL THROUGH.
- CASE_SHUF(SHUFPS, m)
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ CASE_SHUF(SHUFPS, rmi)
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodeSHUFPMask(getRegOperandVectorVT(MI, MVT::f32, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?3:7)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
CASE_VSHUF(64X2, r)
+ Src2Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
+ RegForm = true;
+ // FALL THROUGH.
CASE_VSHUF(64X2, m)
+ decodeVSHUF64x2FamilyMask(getRegOperandVectorVT(MI, MVT::i64, 0),
+ MI->getOperand(NumOperands - 1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?3:7)).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
CASE_VSHUF(32X4, r)
- CASE_VSHUF(32X4, m) {
- MVT VT;
- bool HasMemOp;
- unsigned NumOp = MI->getNumOperands();
- getVSHUF64x2FamilyInfo(MI, VT, HasMemOp);
- decodeVSHUF64x2FamilyMask(VT, MI->getOperand(NumOp - 1).getImm(),
+ Src2Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
+ RegForm = true;
+ // FALL THROUGH.
+ CASE_VSHUF(32X4, m)
+ decodeVSHUF64x2FamilyMask(getRegOperandVectorVT(MI, MVT::i32, 0),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?3:7)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
- if (HasMemOp) {
- assert((NumOp >= 8) && "Expected at least 8 operands!");
- Src1Name = getRegName(MI->getOperand(NumOp - 7).getReg());
- } else {
- assert((NumOp >= 4) && "Expected at least 4 operands!");
- Src2Name = getRegName(MI->getOperand(NumOp - 2).getReg());
- Src1Name = getRegName(MI->getOperand(NumOp - 3).getReg());
- }
break;
- }
CASE_UNPCK(UNPCKLPD, r)
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(UNPCKLPD, m)
DecodeUNPCKLMask(getRegOperandVectorVT(MI, MVT::f64, 0), ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
CASE_UNPCK(UNPCKLPS, r)
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(UNPCKLPS, m)
DecodeUNPCKLMask(getRegOperandVectorVT(MI, MVT::f32, 0), ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
CASE_UNPCK(UNPCKHPD, r)
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(UNPCKHPD, m)
DecodeUNPCKHMask(getRegOperandVectorVT(MI, MVT::f64, 0), ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
CASE_UNPCK(UNPCKHPS, r)
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(UNPCKHPS, m)
DecodeUNPCKHMask(getRegOperandVectorVT(MI, MVT::f32, 0), ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
- CASE_VPERM(PERMILPS, r)
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ CASE_VPERMILPI(PERMILPS, r)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
// FALL THROUGH.
- CASE_VPERM(PERMILPS, m)
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ CASE_VPERMILPI(PERMILPS, m)
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePSHUFMask(getRegOperandVectorVT(MI, MVT::f32, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
- CASE_VPERM(PERMILPD, r)
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ CASE_VPERMILPI(PERMILPD, r)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
// FALL THROUGH.
- CASE_VPERM(PERMILPD, m)
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ CASE_VPERMILPI(PERMILPD, m)
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePSHUFMask(getRegOperandVectorVT(MI, MVT::f64, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
@@ -628,44 +839,58 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VPERM2F128rm:
case X86::VPERM2I128rm:
// For instruction comments purpose, assume the 256-bit vector is v4i64.
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodeVPERM2X128Mask(MVT::v4i64,
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
- case X86::VPERMQYri:
- case X86::VPERMPDYri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ CASE_VPERM(PERMPD, r)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
// FALL THROUGH.
- case X86::VPERMQYmi:
- case X86::VPERMPDYmi:
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
- DecodeVPERMMask(MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ CASE_VPERM(PERMPD, m)
+ if (MI->getOperand(NumOperands - 1).isImm())
+ DecodeVPERMMask(getRegOperandVectorVT(MI, MVT::f64, 0),
+ MI->getOperand(NumOperands - 1).getImm(),
+ ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ CASE_VPERM(PERMQ, r)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
+ // FALL THROUGH.
+ CASE_VPERM(PERMQ, m)
+ if (MI->getOperand(NumOperands - 1).isImm())
+ DecodeVPERMMask(getRegOperandVectorVT(MI, MVT::i64, 0),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
case X86::MOVSDrr:
case X86::VMOVSDrr:
+ case X86::VMOVSDZrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::MOVSDrm:
case X86::VMOVSDrm:
+ case X86::VMOVSDZrm:
DecodeScalarMoveMask(MVT::v2f64, nullptr == Src2Name, ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
case X86::MOVSSrr:
case X86::VMOVSSrr:
+ case X86::VMOVSSZrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::MOVSSrm:
case X86::VMOVSSrm:
+ case X86::VMOVSSZrm:
DecodeScalarMoveMask(MVT::v4f32, nullptr == Src2Name, ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
@@ -681,6 +906,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::MOVZQI2PQIrm:
case X86::MOVZPQILo2PQIrm:
case X86::VMOVQI2PQIrm:
+ case X86::VMOVQI2PQIZrm:
case X86::VMOVZQI2PQIrm:
case X86::VMOVZPQILo2PQIrm:
case X86::VMOVZPQILo2PQIZrm:
@@ -690,6 +916,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::MOVDI2PDIrm:
case X86::VMOVDI2PDIrm:
+ case X86::VMOVDI2PDIZrm:
DecodeZeroMoveLowMask(MVT::v4i32, ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
@@ -717,49 +944,41 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
Src2Name = getRegName(MI->getOperand(2).getReg());
break;
- case X86::PMOVZXBWrr:
- case X86::PMOVZXBDrr:
- case X86::PMOVZXBQrr:
- case X86::PMOVZXWDrr:
- case X86::PMOVZXWQrr:
- case X86::PMOVZXDQrr:
- case X86::VPMOVZXBWrr:
- case X86::VPMOVZXBDrr:
- case X86::VPMOVZXBQrr:
- case X86::VPMOVZXWDrr:
- case X86::VPMOVZXWQrr:
- case X86::VPMOVZXDQrr:
- case X86::VPMOVZXBWYrr:
- case X86::VPMOVZXBDYrr:
- case X86::VPMOVZXBQYrr:
- case X86::VPMOVZXWDYrr:
- case X86::VPMOVZXWQYrr:
- case X86::VPMOVZXDQYrr:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ case X86::VBROADCASTF128:
+ case X86::VBROADCASTI128:
+ DecodeSubVectorBroadcast(MVT::v4f64, MVT::v2f64, ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ CASE_PMOVZX(PMOVZXBW, r)
+ CASE_PMOVZX(PMOVZXBD, r)
+ CASE_PMOVZX(PMOVZXBQ, r)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ // FALL THROUGH.
+ CASE_PMOVZX(PMOVZXBW, m)
+ CASE_PMOVZX(PMOVZXBD, m)
+ CASE_PMOVZX(PMOVZXBQ, m)
+ DecodeZeroExtendMask(MVT::i8, getZeroExtensionResultType(MI), ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ CASE_PMOVZX(PMOVZXWD, r)
+ CASE_PMOVZX(PMOVZXWQ, r)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
// FALL THROUGH.
- case X86::PMOVZXBWrm:
- case X86::PMOVZXBDrm:
- case X86::PMOVZXBQrm:
- case X86::PMOVZXWDrm:
- case X86::PMOVZXWQrm:
- case X86::PMOVZXDQrm:
- case X86::VPMOVZXBWrm:
- case X86::VPMOVZXBDrm:
- case X86::VPMOVZXBQrm:
- case X86::VPMOVZXWDrm:
- case X86::VPMOVZXWQrm:
- case X86::VPMOVZXDQrm:
- case X86::VPMOVZXBWYrm:
- case X86::VPMOVZXBDYrm:
- case X86::VPMOVZXBQYrm:
- case X86::VPMOVZXWDYrm:
- case X86::VPMOVZXWQYrm:
- case X86::VPMOVZXDQYrm: {
- MVT SrcVT, DstVT;
- getZeroExtensionTypes(MI, SrcVT, DstVT);
- DecodeZeroExtendMask(SrcVT, DstVT, ShuffleMask);
- DestName = getRegName(MI->getOperand(0).getReg());
- } break;
+ CASE_PMOVZX(PMOVZXWD, m)
+ CASE_PMOVZX(PMOVZXWQ, m)
+ DecodeZeroExtendMask(MVT::i16, getZeroExtensionResultType(MI), ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ CASE_PMOVZX(PMOVZXDQ, r)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ // FALL THROUGH.
+ CASE_PMOVZX(PMOVZXDQ, m)
+ DecodeZeroExtendMask(MVT::i32, getZeroExtensionResultType(MI), ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
}
// The only comments we decode are shuffles, so give up if we were unable to
@@ -768,7 +987,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
return false;
if (!DestName) DestName = Src1Name;
- OS << (DestName ? DestName : "mem") << " = ";
+ OS << (DestName ? getMaskName(MI, DestName, getRegName) : "mem") << " = ";
// If the two sources are the same, canonicalize the input elements to be
// from the first src so that we get larger element spans.
diff --git a/lib/Target/X86/MCTargetDesc/CMakeLists.txt b/lib/Target/X86/MCTargetDesc/CMakeLists.txt
index 129c28d804ef8..33df9ec7dcde7 100644
--- a/lib/Target/X86/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/X86/MCTargetDesc/CMakeLists.txt
@@ -7,6 +7,4 @@ add_llvm_library(LLVMX86Desc
X86ELFObjectWriter.cpp
X86WinCOFFStreamer.cpp
X86WinCOFFObjectWriter.cpp
- X86MachORelocationInfo.cpp
- X86ELFRelocationInfo.cpp
)
diff --git a/lib/Target/X86/MCTargetDesc/Makefile b/lib/Target/X86/MCTargetDesc/Makefile
deleted file mode 100644
index b19774ee379e1..0000000000000
--- a/lib/Target/X86/MCTargetDesc/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/X86/TargetDesc/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMX86Desc
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index 135c32bf8c3b0..e77a0dc9bc27a 100644
--- a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -21,7 +21,7 @@
#include "llvm/MC/MCSectionCOFF.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSectionMachO.h"
-#include "llvm/Support/CommandLine.h"
+#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MachO.h"
@@ -43,8 +43,11 @@ static unsigned getFixupKindLog2Size(unsigned Kind) {
return 1;
case FK_PCRel_4:
case X86::reloc_riprel_4byte:
+ case X86::reloc_riprel_4byte_relax:
+ case X86::reloc_riprel_4byte_relax_rex:
case X86::reloc_riprel_4byte_movq_load:
case X86::reloc_signed_4byte:
+ case X86::reloc_signed_4byte_relax:
case X86::reloc_global_offset_table:
case FK_SecRel_4:
case FK_Data_4:
@@ -72,7 +75,8 @@ class X86AsmBackend : public MCAsmBackend {
const uint64_t MaxNopLength;
public:
X86AsmBackend(const Target &T, StringRef CPU)
- : MCAsmBackend(), CPU(CPU), MaxNopLength(CPU == "slm" ? 7 : 15) {
+ : MCAsmBackend(), CPU(CPU),
+ MaxNopLength((CPU == "slm" || CPU == "lakemont") ? 7 : 15) {
HasNopl = CPU != "generic" && CPU != "i386" && CPU != "i486" &&
CPU != "i586" && CPU != "pentium" && CPU != "pentium-mmx" &&
CPU != "i686" && CPU != "k6" && CPU != "k6-2" && CPU != "k6-3" &&
@@ -86,10 +90,14 @@ public:
const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
- { "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel },
- { "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel},
- { "reloc_signed_4byte", 0, 4 * 8, 0},
- { "reloc_global_offset_table", 0, 4 * 8, 0}
+ {"reloc_riprel_4byte", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"reloc_riprel_4byte_movq_load", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"reloc_riprel_4byte_relax", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"reloc_riprel_4byte_relax_rex", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"reloc_signed_4byte", 0, 32, 0},
+ {"reloc_signed_4byte_relax", 0, 32, 0},
+ {"reloc_global_offset_table", 0, 32, 0},
+ {"reloc_global_offset_table8", 0, 64, 0},
};
if (Kind < FirstTargetFixupKind)
@@ -124,38 +132,57 @@ public:
const MCRelaxableFragment *DF,
const MCAsmLayout &Layout) const override;
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const override;
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override;
bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
};
} // end anonymous namespace
-static unsigned getRelaxedOpcodeBranch(unsigned Op) {
+static unsigned getRelaxedOpcodeBranch(const MCInst &Inst, bool is16BitMode) {
+ unsigned Op = Inst.getOpcode();
switch (Op) {
default:
return Op;
-
- case X86::JAE_1: return X86::JAE_4;
- case X86::JA_1: return X86::JA_4;
- case X86::JBE_1: return X86::JBE_4;
- case X86::JB_1: return X86::JB_4;
- case X86::JE_1: return X86::JE_4;
- case X86::JGE_1: return X86::JGE_4;
- case X86::JG_1: return X86::JG_4;
- case X86::JLE_1: return X86::JLE_4;
- case X86::JL_1: return X86::JL_4;
- case X86::JMP_1: return X86::JMP_4;
- case X86::JNE_1: return X86::JNE_4;
- case X86::JNO_1: return X86::JNO_4;
- case X86::JNP_1: return X86::JNP_4;
- case X86::JNS_1: return X86::JNS_4;
- case X86::JO_1: return X86::JO_4;
- case X86::JP_1: return X86::JP_4;
- case X86::JS_1: return X86::JS_4;
+ case X86::JAE_1:
+ return (is16BitMode) ? X86::JAE_2 : X86::JAE_4;
+ case X86::JA_1:
+ return (is16BitMode) ? X86::JA_2 : X86::JA_4;
+ case X86::JBE_1:
+ return (is16BitMode) ? X86::JBE_2 : X86::JBE_4;
+ case X86::JB_1:
+ return (is16BitMode) ? X86::JB_2 : X86::JB_4;
+ case X86::JE_1:
+ return (is16BitMode) ? X86::JE_2 : X86::JE_4;
+ case X86::JGE_1:
+ return (is16BitMode) ? X86::JGE_2 : X86::JGE_4;
+ case X86::JG_1:
+ return (is16BitMode) ? X86::JG_2 : X86::JG_4;
+ case X86::JLE_1:
+ return (is16BitMode) ? X86::JLE_2 : X86::JLE_4;
+ case X86::JL_1:
+ return (is16BitMode) ? X86::JL_2 : X86::JL_4;
+ case X86::JMP_1:
+ return (is16BitMode) ? X86::JMP_2 : X86::JMP_4;
+ case X86::JNE_1:
+ return (is16BitMode) ? X86::JNE_2 : X86::JNE_4;
+ case X86::JNO_1:
+ return (is16BitMode) ? X86::JNO_2 : X86::JNO_4;
+ case X86::JNP_1:
+ return (is16BitMode) ? X86::JNP_2 : X86::JNP_4;
+ case X86::JNS_1:
+ return (is16BitMode) ? X86::JNS_2 : X86::JNS_4;
+ case X86::JO_1:
+ return (is16BitMode) ? X86::JO_2 : X86::JO_4;
+ case X86::JP_1:
+ return (is16BitMode) ? X86::JP_2 : X86::JP_4;
+ case X86::JS_1:
+ return (is16BitMode) ? X86::JS_2 : X86::JS_4;
}
}
-static unsigned getRelaxedOpcodeArith(unsigned Op) {
+static unsigned getRelaxedOpcodeArith(const MCInst &Inst) {
+ unsigned Op = Inst.getOpcode();
switch (Op) {
default:
return Op;
@@ -239,20 +266,20 @@ static unsigned getRelaxedOpcodeArith(unsigned Op) {
}
}
-static unsigned getRelaxedOpcode(unsigned Op) {
- unsigned R = getRelaxedOpcodeArith(Op);
- if (R != Op)
+static unsigned getRelaxedOpcode(const MCInst &Inst, bool is16BitMode) {
+ unsigned R = getRelaxedOpcodeArith(Inst);
+ if (R != Inst.getOpcode())
return R;
- return getRelaxedOpcodeBranch(Op);
+ return getRelaxedOpcodeBranch(Inst, is16BitMode);
}
bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
- // Branches can always be relaxed.
- if (getRelaxedOpcodeBranch(Inst.getOpcode()) != Inst.getOpcode())
+ // Branches can always be relaxed in either mode.
+ if (getRelaxedOpcodeBranch(Inst, false) != Inst.getOpcode())
return true;
// Check if this instruction is ever relaxable.
- if (getRelaxedOpcodeArith(Inst.getOpcode()) == Inst.getOpcode())
+ if (getRelaxedOpcodeArith(Inst) == Inst.getOpcode())
return false;
@@ -275,9 +302,12 @@ bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
// FIXME: Can tblgen help at all here to verify there aren't other instructions
// we can relax?
-void X86AsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const {
+void X86AsmBackend::relaxInstruction(const MCInst &Inst,
+ const MCSubtargetInfo &STI,
+ MCInst &Res) const {
// The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
- unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode());
+ bool is16BitMode = STI.getFeatureBits()[X86::Mode16Bit];
+ unsigned RelaxedOp = getRelaxedOpcode(Inst, is16BitMode);
if (RelaxedOp == Inst.getOpcode()) {
SmallString<256> Tmp;
@@ -405,6 +435,14 @@ public:
, Is64Bit(is64Bit) {
}
+ Optional<MCFixupKind> getFixupKind(StringRef Name) const override {
+ return StringSwitch<Optional<MCFixupKind>>(Name)
+ .Case("dir32", FK_Data_4)
+ .Case("secrel32", FK_SecRel_4)
+ .Case("secidx", FK_SecRel_2)
+ .Default(MCAsmBackend::getFixupKind(Name));
+ }
+
MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
return createX86WinCOFFObjectWriter(OS, Is64Bit);
}
@@ -803,7 +841,7 @@ MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
if (TheTriple.isOSBinFormatMachO())
return new DarwinX86_32AsmBackend(T, MRI, CPU);
- if (TheTriple.isOSWindows() && !TheTriple.isOSBinFormatELF())
+ if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
return new WindowsX86AsmBackend(T, false, CPU);
uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
@@ -826,7 +864,7 @@ MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T,
return new DarwinX86_64AsmBackend(T, MRI, CPU, CS);
}
- if (TheTriple.isOSWindows() && !TheTriple.isOSBinFormatELF())
+ if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
return new WindowsX86AsmBackend(T, true, CPU);
uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
diff --git a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
index 9ff85b9154f8a..b4195176f9042 100644
--- a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
+++ b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
@@ -186,11 +186,6 @@ namespace X86II {
/// dllimport linkage on windows.
MO_DLLIMPORT,
- /// MO_DARWIN_STUB - On a symbol operand "FOO", this indicates that the
- /// reference is actually to the "FOO$stub" symbol. This is used for calls
- /// and jumps to external functions on Tiger and earlier.
- MO_DARWIN_STUB,
-
/// MO_DARWIN_NONLAZY - On a symbol operand "FOO", this indicates that the
/// reference is actually to the "FOO$non_lazy_ptr" symbol, which is a
/// non-PIC-base-relative reference to a non-hidden dyld lazy pointer stub.
@@ -201,12 +196,6 @@ namespace X86II {
/// a PIC-base-relative reference to a non-hidden dyld lazy pointer stub.
MO_DARWIN_NONLAZY_PIC_BASE,
- /// MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE - On a symbol operand "FOO", this
- /// indicates that the reference is actually to "FOO$non_lazy_ptr -PICBASE",
- /// which is a PIC-base-relative reference to a hidden dyld lazy pointer
- /// stub.
- MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE,
-
/// MO_TLVP - On a symbol operand this indicates that the immediate is
/// some TLS offset.
///
@@ -667,7 +656,7 @@ namespace X86II {
/// is duplicated in the MCInst (e.g. "EAX = addl EAX, [mem]") it is only
/// counted as one operand.
///
- inline int getMemoryOperandNo(uint64_t TSFlags, unsigned Opcode) {
+ inline int getMemoryOperandNo(uint64_t TSFlags) {
bool HasVEX_4V = TSFlags & X86II::VEX_4V;
bool HasMemOp4 = TSFlags & X86II::MemOp4;
bool HasEVEX_K = TSFlags & X86II::EVEX_K;
@@ -734,12 +723,12 @@ namespace X86II {
/// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended (r8 or
/// higher) register? e.g. r8, xmm8, xmm13, etc.
inline bool isX86_64ExtendedReg(unsigned RegNo) {
- if ((RegNo > X86::XMM7 && RegNo <= X86::XMM15) ||
- (RegNo > X86::XMM23 && RegNo <= X86::XMM31) ||
- (RegNo > X86::YMM7 && RegNo <= X86::YMM15) ||
- (RegNo > X86::YMM23 && RegNo <= X86::YMM31) ||
- (RegNo > X86::ZMM7 && RegNo <= X86::ZMM15) ||
- (RegNo > X86::ZMM23 && RegNo <= X86::ZMM31))
+ if ((RegNo >= X86::XMM8 && RegNo <= X86::XMM15) ||
+ (RegNo >= X86::XMM24 && RegNo <= X86::XMM31) ||
+ (RegNo >= X86::YMM8 && RegNo <= X86::YMM15) ||
+ (RegNo >= X86::YMM24 && RegNo <= X86::YMM31) ||
+ (RegNo >= X86::ZMM8 && RegNo <= X86::ZMM15) ||
+ (RegNo >= X86::ZMM24 && RegNo <= X86::ZMM31))
return true;
switch (RegNo) {
@@ -762,9 +751,9 @@ namespace X86II {
/// is32ExtendedReg - Is the MemoryOperand a 32 extended (zmm16 or higher)
/// registers? e.g. zmm21, etc.
static inline bool is32ExtendedReg(unsigned RegNo) {
- return ((RegNo > X86::XMM15 && RegNo <= X86::XMM31) ||
- (RegNo > X86::YMM15 && RegNo <= X86::YMM31) ||
- (RegNo > X86::ZMM15 && RegNo <= X86::ZMM31));
+ return ((RegNo >= X86::XMM16 && RegNo <= X86::XMM31) ||
+ (RegNo >= X86::YMM16 && RegNo <= X86::YMM31) ||
+ (RegNo >= X86::ZMM16 && RegNo <= X86::ZMM31));
}
diff --git a/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
index 736c39dfb6f13..da69da51df108 100644
--- a/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
@@ -9,6 +9,8 @@
#include "MCTargetDesc/X86FixupKinds.h"
#include "MCTargetDesc/X86MCTargetDesc.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCValue.h"
@@ -25,8 +27,8 @@ namespace {
~X86ELFObjectWriter() override;
protected:
- unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel) const override;
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsPCRel) const override;
};
}
@@ -56,6 +58,7 @@ static X86_64RelType getType64(unsigned Kind,
case FK_Data_8:
return RT64_64;
case X86::reloc_signed_4byte:
+ case X86::reloc_signed_4byte_relax:
if (Modifier == MCSymbolRefExpr::VK_None && !IsPCRel)
return RT64_32S;
return RT64_32;
@@ -66,6 +69,8 @@ static X86_64RelType getType64(unsigned Kind,
case FK_Data_4:
case FK_PCRel_4:
case X86::reloc_riprel_4byte:
+ case X86::reloc_riprel_4byte_relax:
+ case X86::reloc_riprel_4byte_relax_rex:
case X86::reloc_riprel_4byte_movq_load:
return RT64_32;
case FK_PCRel_2:
@@ -77,8 +82,16 @@ static X86_64RelType getType64(unsigned Kind,
}
}
-static unsigned getRelocType64(MCSymbolRefExpr::VariantKind Modifier,
- X86_64RelType Type, bool IsPCRel) {
+static void checkIs32(MCContext &Ctx, SMLoc Loc, X86_64RelType Type) {
+ if (Type != RT64_32)
+ Ctx.reportError(Loc,
+ "32 bit reloc applied to a field with a different size");
+}
+
+static unsigned getRelocType64(MCContext &Ctx, SMLoc Loc,
+ MCSymbolRefExpr::VariantKind Modifier,
+ X86_64RelType Type, bool IsPCRel,
+ unsigned Kind) {
switch (Modifier) {
default:
llvm_unreachable("Unimplemented");
@@ -146,21 +159,38 @@ static unsigned getRelocType64(MCSymbolRefExpr::VariantKind Modifier,
case RT64_8:
llvm_unreachable("Unimplemented");
}
+ case MCSymbolRefExpr::VK_TLSCALL:
+ return ELF::R_X86_64_TLSDESC_CALL;
+ case MCSymbolRefExpr::VK_TLSDESC:
+ return ELF::R_X86_64_GOTPC32_TLSDESC;
case MCSymbolRefExpr::VK_TLSGD:
- assert(Type == RT64_32);
+ checkIs32(Ctx, Loc, Type);
return ELF::R_X86_64_TLSGD;
case MCSymbolRefExpr::VK_GOTTPOFF:
- assert(Type == RT64_32);
+ checkIs32(Ctx, Loc, Type);
return ELF::R_X86_64_GOTTPOFF;
case MCSymbolRefExpr::VK_TLSLD:
- assert(Type == RT64_32);
+ checkIs32(Ctx, Loc, Type);
return ELF::R_X86_64_TLSLD;
case MCSymbolRefExpr::VK_PLT:
- assert(Type == RT64_32);
+ checkIs32(Ctx, Loc, Type);
return ELF::R_X86_64_PLT32;
case MCSymbolRefExpr::VK_GOTPCREL:
- assert(Type == RT64_32);
- return ELF::R_X86_64_GOTPCREL;
+ checkIs32(Ctx, Loc, Type);
+ // Older versions of ld.bfd/ld.gold/lld
+ // do not support GOTPCRELX/REX_GOTPCRELX,
+ // and we want to keep back-compatibility.
+ if (!Ctx.getAsmInfo()->canRelaxRelocations())
+ return ELF::R_X86_64_GOTPCREL;
+ switch (Kind) {
+ default:
+ return ELF::R_X86_64_GOTPCREL;
+ case X86::reloc_riprel_4byte_relax:
+ return ELF::R_X86_64_GOTPCRELX;
+ case X86::reloc_riprel_4byte_relax_rex:
+ case X86::reloc_riprel_4byte_movq_load:
+ return ELF::R_X86_64_REX_GOTPCRELX;
+ }
}
}
@@ -181,8 +211,10 @@ static X86_32RelType getType32(X86_64RelType T) {
llvm_unreachable("unexpected relocation type!");
}
-static unsigned getRelocType32(MCSymbolRefExpr::VariantKind Modifier,
- X86_32RelType Type, bool IsPCRel) {
+static unsigned getRelocType32(MCContext &Ctx,
+ MCSymbolRefExpr::VariantKind Modifier,
+ X86_32RelType Type, bool IsPCRel,
+ unsigned Kind) {
switch (Modifier) {
default:
llvm_unreachable("Unimplemented");
@@ -197,7 +229,15 @@ static unsigned getRelocType32(MCSymbolRefExpr::VariantKind Modifier,
}
case MCSymbolRefExpr::VK_GOT:
assert(Type == RT32_32);
- return IsPCRel ? ELF::R_386_GOTPC : ELF::R_386_GOT32;
+ if (IsPCRel)
+ return ELF::R_386_GOTPC;
+ // Older versions of ld.bfd/ld.gold/lld do not support R_386_GOT32X and we
+ // want to maintain compatibility.
+ if (!Ctx.getAsmInfo()->canRelaxRelocations())
+ return ELF::R_386_GOT32;
+
+ return Kind == X86::reloc_signed_4byte_relax ? ELF::R_386_GOT32X
+ : ELF::R_386_GOT32;
case MCSymbolRefExpr::VK_GOTOFF:
assert(Type == RT32_32);
assert(!IsPCRel);
@@ -240,17 +280,18 @@ static unsigned getRelocType32(MCSymbolRefExpr::VariantKind Modifier,
}
}
-unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target,
+unsigned X86ELFObjectWriter::getRelocType(MCContext &Ctx, const MCValue &Target,
const MCFixup &Fixup,
bool IsPCRel) const {
MCSymbolRefExpr::VariantKind Modifier = Target.getAccessVariant();
- X86_64RelType Type = getType64(Fixup.getKind(), Modifier, IsPCRel);
+ unsigned Kind = Fixup.getKind();
+ X86_64RelType Type = getType64(Kind, Modifier, IsPCRel);
if (getEMachine() == ELF::EM_X86_64)
- return getRelocType64(Modifier, Type, IsPCRel);
+ return getRelocType64(Ctx, Fixup.getLoc(), Modifier, Type, IsPCRel, Kind);
assert((getEMachine() == ELF::EM_386 || getEMachine() == ELF::EM_IAMCU) &&
"Unsupported ELF machine type.");
- return getRelocType32(Modifier, getType32(Type), IsPCRel);
+ return getRelocType32(Ctx, Modifier, getType32(Type), IsPCRel, Kind);
}
MCObjectWriter *llvm::createX86ELFObjectWriter(raw_pwrite_stream &OS,
diff --git a/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp b/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp
deleted file mode 100644
index ddb764facdbfa..0000000000000
--- a/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp
+++ /dev/null
@@ -1,141 +0,0 @@
-//===-- X86ELFRelocationInfo.cpp ----------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/X86MCTargetDesc.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCRelocationInfo.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Object/ELFObjectFile.h"
-#include "llvm/Support/ELF.h"
-
-using namespace llvm;
-using namespace object;
-using namespace ELF;
-
-namespace {
-class X86_64ELFRelocationInfo : public MCRelocationInfo {
-public:
- X86_64ELFRelocationInfo(MCContext &Ctx) : MCRelocationInfo(Ctx) {}
-
- const MCExpr *createExprForRelocation(RelocationRef Rel) override {
- uint64_t RelType = Rel.getType();
- elf_symbol_iterator SymI = Rel.getSymbol();
-
- ErrorOr<StringRef> SymNameOrErr = SymI->getName();
- if (std::error_code EC = SymNameOrErr.getError())
- report_fatal_error(EC.message());
- StringRef SymName = *SymNameOrErr;
-
- ErrorOr<uint64_t> SymAddr = SymI->getAddress();
- if (std::error_code EC = SymAddr.getError())
- report_fatal_error(EC.message());
- uint64_t SymSize = SymI->getSize();
- int64_t Addend = *ELFRelocationRef(Rel).getAddend();
-
- MCSymbol *Sym = Ctx.getOrCreateSymbol(SymName);
- // FIXME: check that the value is actually the same.
- if (!Sym->isVariable())
- Sym->setVariableValue(MCConstantExpr::create(*SymAddr, Ctx));
-
- const MCExpr *Expr = nullptr;
- // If hasAddend is true, then we need to add Addend (r_addend) to Expr.
- bool hasAddend = false;
-
- // The AMD64 SysV ABI says:
- // A: the addend used to compute the value of the relocatable field.
- // B: the base address at which a shared object has been loaded into memory
- // during execution. Generally, a shared object is built with a 0 base
- // virtual address, but the execution address will be different.
- // G: the offset into the global offset table at which the relocation
- // entry's symbol will reside during execution.
- // GOT: the address of the global offset table.
- // L: the place (section offset or address) of the Procedure Linkage Table
- // entry for a symbol.
- // P: the place (section offset or address) of the storage unit being
- // relocated (computed using r_offset).
- // S: the value of the symbol whose index resides in the relocation entry.
- // Z: the size of the symbol whose index resides in the relocation entry.
-
- switch(RelType) {
- case R_X86_64_NONE:
- case R_X86_64_COPY:
- // none
- break;
- case R_X86_64_64:
- case R_X86_64_16:
- case R_X86_64_8:
- // S + A
- case R_X86_64_32:
- case R_X86_64_32S:
- // S + A (We don't care about the result not fitting in 32 bits.)
- case R_X86_64_PC32:
- case R_X86_64_PC16:
- case R_X86_64_PC8:
- case R_X86_64_PC64:
- // S + A - P (P/pcrel is implicit)
- hasAddend = true;
- Expr = MCSymbolRefExpr::create(Sym, Ctx);
- break;
- case R_X86_64_GOT32:
- case R_X86_64_GOT64:
- case R_X86_64_GOTPC32:
- case R_X86_64_GOTPC64:
- case R_X86_64_GOTPLT64:
- // G + A
- hasAddend = true;
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_GOT, Ctx);
- break;
- case R_X86_64_PLT32:
- // L + A - P -> S@PLT + A
- hasAddend = true;
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_PLT, Ctx);
- break;
- case R_X86_64_GLOB_DAT:
- case R_X86_64_JUMP_SLOT:
- // S
- Expr = MCSymbolRefExpr::create(Sym, Ctx);
- break;
- case R_X86_64_GOTPCREL:
- case R_X86_64_GOTPCREL64:
- // G + GOT + A - P -> S@GOTPCREL + A
- hasAddend = true;
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_GOTPCREL, Ctx);
- break;
- case R_X86_64_GOTOFF64:
- // S + A - GOT
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_GOTOFF, Ctx);
- break;
- case R_X86_64_PLTOFF64:
- // L + A - GOT
- break;
- case R_X86_64_SIZE32:
- case R_X86_64_SIZE64:
- // Z + A
- Expr = MCConstantExpr::create(SymSize, Ctx);
- break;
- default:
- Expr = MCSymbolRefExpr::create(Sym, Ctx);
- break;
- }
- if (Expr && hasAddend && Addend != 0)
- Expr = MCBinaryExpr::createAdd(Expr,
- MCConstantExpr::create(Addend, Ctx),
- Ctx);
- return Expr;
- }
-};
-} // End unnamed namespace
-
-/// createX86ELFRelocationInfo - Construct an X86 Mach-O RelocationInfo.
-MCRelocationInfo *llvm::createX86_64ELFRelocationInfo(MCContext &Ctx) {
- // We only handle x86-64 for now.
- return new X86_64ELFRelocationInfo(Ctx);
-}
diff --git a/lib/Target/X86/MCTargetDesc/X86FixupKinds.h b/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
index 4899900dcef9d..dfdc9ec29aec7 100644
--- a/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
+++ b/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
@@ -17,9 +17,15 @@ namespace X86 {
enum Fixups {
reloc_riprel_4byte = FirstTargetFixupKind, // 32-bit rip-relative
reloc_riprel_4byte_movq_load, // 32-bit rip-relative in movq
+ reloc_riprel_4byte_relax, // 32-bit rip-relative in relaxable
+ // instruction
+ reloc_riprel_4byte_relax_rex, // 32-bit rip-relative in relaxable
+ // instruction with rex prefix
reloc_signed_4byte, // 32-bit signed. Unlike FK_Data_4
// this will be sign extended at
// runtime.
+ reloc_signed_4byte_relax, // like reloc_signed_4byte, but
+ // in a relaxable instruction.
reloc_global_offset_table, // 32-bit, relative to the start
// of the instruction. Used only
// for _GLOBAL_OFFSET_TABLE_.
diff --git a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
index fc0b0f89e23df..b7c56cec2db89 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
@@ -35,7 +35,7 @@ AsmWriterFlavor("x86-asm-syntax", cl::init(ATT),
clEnumValEnd));
static cl::opt<bool>
-MarkedJTDataRegions("mark-data-regions", cl::init(false),
+MarkedJTDataRegions("mark-data-regions", cl::init(true),
cl::desc("Mark code section jump table data regions."),
cl::Hidden);
diff --git a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
index dfab6ec10775c..96c2e81c332a9 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
@@ -76,36 +76,16 @@ public:
return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7;
}
- // On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range
- // 0-7 and the difference between the 2 groups is given by the REX prefix.
- // In the VEX prefix, registers are seen sequencially from 0-15 and encoded
- // in 1's complement form, example:
- //
- // ModRM field => XMM9 => 1
- // VEX.VVVV => XMM9 => ~9
- //
- // See table 4-35 of Intel AVX Programming Reference for details.
- unsigned char getVEXRegisterEncoding(const MCInst &MI,
- unsigned OpNum) const {
- unsigned SrcReg = MI.getOperand(OpNum).getReg();
- unsigned SrcRegNum = GetX86RegNum(MI.getOperand(OpNum));
- if (X86II::isX86_64ExtendedReg(SrcReg))
- SrcRegNum |= 8;
-
- // The registers represented through VEX_VVVV should
- // be encoded in 1's complement form.
- return (~SrcRegNum) & 0xf;
+ unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const {
+ return Ctx.getRegisterInfo()->getEncodingValue(
+ MI.getOperand(OpNum).getReg());
}
- unsigned char getWriteMaskRegisterEncoding(const MCInst &MI,
- unsigned OpNum) const {
- assert(X86::K0 != MI.getOperand(OpNum).getReg() &&
- "Invalid mask register as write-mask!");
- unsigned MaskRegNum = GetX86RegNum(MI.getOperand(OpNum));
- return MaskRegNum;
+ bool isX86_64ExtendedReg(const MCInst &MI, unsigned OpNum) const {
+ return (getX86RegEncoding(MI, OpNum) >> 3) & 1;
}
- void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const {
+ void EmitByte(uint8_t C, unsigned &CurByte, raw_ostream &OS) const {
OS << (char)C;
++CurByte;
}
@@ -125,8 +105,8 @@ public:
SmallVectorImpl<MCFixup> &Fixups,
int ImmOffset = 0) const;
- inline static unsigned char ModRMByte(unsigned Mod, unsigned RegOpcode,
- unsigned RM) {
+ inline static uint8_t ModRMByte(unsigned Mod, unsigned RegOpcode,
+ unsigned RM) {
assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
return RM | (RegOpcode << 3) | (Mod << 6);
}
@@ -142,11 +122,9 @@ public:
EmitByte(ModRMByte(SS, Index, Base), CurByte, OS);
}
-
- void EmitMemModRMByte(const MCInst &MI, unsigned Op,
- unsigned RegOpcodeField,
- uint64_t TSFlags, unsigned &CurByte, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups,
+ void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField,
+ uint64_t TSFlags, bool Rex, unsigned &CurByte,
+ raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
void encodeInstruction(const MCInst &MI, raw_ostream &OS,
@@ -160,10 +138,12 @@ public:
void EmitSegmentOverridePrefix(unsigned &CurByte, unsigned SegOperand,
const MCInst &MI, raw_ostream &OS) const;
- void EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
+ bool emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
const MCInst &MI, const MCInstrDesc &Desc,
- const MCSubtargetInfo &STI,
- raw_ostream &OS) const;
+ const MCSubtargetInfo &STI, raw_ostream &OS) const;
+
+ uint8_t DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
+ int MemOperand, const MCInstrDesc &Desc) const;
};
} // end anonymous namespace
@@ -177,7 +157,7 @@ MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII,
/// isDisp8 - Return true if this signed displacement fits in a 8-bit
/// sign-extended field.
static bool isDisp8(int Value) {
- return Value == (signed char)Value;
+ return Value == (int8_t)Value;
}
/// isCDisp8 - Return true if this signed displacement fits in a 8-bit
@@ -198,7 +178,7 @@ static bool isCDisp8(uint64_t TSFlags, int Value, int& CValue) {
if (Value & Mask) // Unaligned offset
return false;
Value /= (int)CD8_Scale;
- bool Ret = (Value == (signed char)Value);
+ bool Ret = (Value == (int8_t)Value);
if (Ret)
CValue = Value;
@@ -231,6 +211,10 @@ static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) {
(IndexReg.getReg() != 0 &&
X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg())))
return true;
+ if (BaseReg.getReg() == X86::EIP) {
+ assert(IndexReg.getReg() == 0 && "Invalid eip-based address.");
+ return true;
+ }
return false;
}
@@ -343,7 +327,9 @@ EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size,
// the start of the field, not the end of the field.
if (FixupKind == FK_PCRel_4 ||
FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
- FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load))
+ FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load) ||
+ FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax) ||
+ FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_rex))
ImmOffset -= 4;
if (FixupKind == FK_PCRel_2)
ImmOffset -= 2;
@@ -359,12 +345,12 @@ EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size,
EmitConstant(0, Size, CurByte, OS);
}
-void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
+void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
unsigned RegOpcodeField,
- uint64_t TSFlags, unsigned &CurByte,
- raw_ostream &OS,
+ uint64_t TSFlags, bool Rex,
+ unsigned &CurByte, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const{
+ const MCSubtargetInfo &STI) const {
const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp);
const MCOperand &Base = MI.getOperand(Op+X86::AddrBaseReg);
const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt);
@@ -373,18 +359,38 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX;
// Handle %rip relative addressing.
- if (BaseReg == X86::RIP) { // [disp32+RIP] in X86-64 mode
+ if (BaseReg == X86::RIP ||
+ BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode
assert(is64BitMode(STI) && "Rip-relative addressing requires 64-bit mode");
assert(IndexReg.getReg() == 0 && "Invalid rip-relative address");
EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
- unsigned FixupKind = X86::reloc_riprel_4byte;
-
+ unsigned Opcode = MI.getOpcode();
// movq loads are handled with a special relocation form which allows the
// linker to eliminate some loads for GOT references which end up in the
// same linkage unit.
- if (MI.getOpcode() == X86::MOV64rm)
- FixupKind = X86::reloc_riprel_4byte_movq_load;
+ unsigned FixupKind = [=]() {
+ switch (Opcode) {
+ default:
+ return X86::reloc_riprel_4byte;
+ case X86::MOV64rm:
+ assert(Rex);
+ return X86::reloc_riprel_4byte_movq_load;
+ case X86::CALL64m:
+ case X86::JMP64m:
+ case X86::TEST64rm:
+ case X86::ADC64rm:
+ case X86::ADD64rm:
+ case X86::AND64rm:
+ case X86::CMP64rm:
+ case X86::OR64rm:
+ case X86::SBB64rm:
+ case X86::SUB64rm:
+ case X86::XOR64rm:
+ return Rex ? X86::reloc_riprel_4byte_relax_rex
+ : X86::reloc_riprel_4byte_relax;
+ }
+ }();
// rip-relative addressing is actually relative to the *next* instruction.
// Since an immediate can follow the mod/rm byte for an instruction, this
@@ -510,8 +516,11 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
// Otherwise, emit the most general non-SIB encoding: [REG+disp32]
EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS);
- EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte),
- CurByte, OS, Fixups);
+ unsigned Opcode = MI.getOpcode();
+ unsigned FixupKind = Opcode == X86::MOV32rm ? X86::reloc_signed_4byte_relax
+ : X86::reloc_signed_4byte;
+ EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), CurByte, OS,
+ Fixups);
return;
}
@@ -603,26 +612,26 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// 1: Same as REX_R=0 (must be 1 in 32-bit mode)
// 0: Same as REX_R=1 (64 bit mode only)
//
- unsigned char VEX_R = 0x1;
- unsigned char EVEX_R2 = 0x1;
+ uint8_t VEX_R = 0x1;
+ uint8_t EVEX_R2 = 0x1;
// VEX_X: equivalent to REX.X, only used when a
// register is used for index in SIB Byte.
//
// 1: Same as REX.X=0 (must be 1 in 32-bit mode)
// 0: Same as REX.X=1 (64-bit mode only)
- unsigned char VEX_X = 0x1;
+ uint8_t VEX_X = 0x1;
// VEX_B:
//
// 1: Same as REX_B=0 (ignored in 32-bit mode)
// 0: Same as REX_B=1 (64 bit mode only)
//
- unsigned char VEX_B = 0x1;
+ uint8_t VEX_B = 0x1;
// VEX_W: opcode specific (use like REX.W, or used for
// opcode extension, or ignored, depending on the opcode byte)
- unsigned char VEX_W = 0;
+ uint8_t VEX_W = (TSFlags & X86II::VEX_W) ? 1 : 0;
// VEX_5M (VEX m-mmmmm field):
//
@@ -634,20 +643,31 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// 0b01000: XOP map select - 08h instructions with imm byte
// 0b01001: XOP map select - 09h instructions with no imm byte
// 0b01010: XOP map select - 0Ah instructions with imm dword
- unsigned char VEX_5M = 0;
+ uint8_t VEX_5M;
+ switch (TSFlags & X86II::OpMapMask) {
+ default: llvm_unreachable("Invalid prefix!");
+ case X86II::TB: VEX_5M = 0x1; break; // 0F
+ case X86II::T8: VEX_5M = 0x2; break; // 0F 38
+ case X86II::TA: VEX_5M = 0x3; break; // 0F 3A
+ case X86II::XOP8: VEX_5M = 0x8; break;
+ case X86II::XOP9: VEX_5M = 0x9; break;
+ case X86II::XOPA: VEX_5M = 0xA; break;
+ }
// VEX_4V (VEX vvvv field): a register specifier
// (in 1's complement form) or 1111 if unused.
- unsigned char VEX_4V = 0xf;
- unsigned char EVEX_V2 = 0x1;
+ uint8_t VEX_4V = 0xf;
+ uint8_t EVEX_V2 = 0x1;
- // VEX_L (Vector Length):
+ // EVEX_L2/VEX_L (Vector Length):
//
- // 0: scalar or 128-bit vector
- // 1: 256-bit vector
+ // L2 L
+ // 0 0: scalar or 128-bit vector
+ // 0 1: 256-bit vector
+ // 1 0: 512-bit vector
//
- unsigned char VEX_L = 0;
- unsigned char EVEX_L2 = 0;
+ uint8_t VEX_L = (TSFlags & X86II::VEX_L) ? 1 : 0;
+ uint8_t EVEX_L2 = (TSFlags & X86II::EVEX_L2) ? 1 : 0;
// VEX_PP: opcode extension providing equivalent
// functionality of a SIMD prefix
@@ -657,56 +677,32 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// 0b10: F3
// 0b11: F2
//
- unsigned char VEX_PP = 0;
+ uint8_t VEX_PP;
+ switch (TSFlags & X86II::OpPrefixMask) {
+ default: llvm_unreachable("Invalid op prefix!");
+ case X86II::PS: VEX_PP = 0x0; break; // none
+ case X86II::PD: VEX_PP = 0x1; break; // 66
+ case X86II::XS: VEX_PP = 0x2; break; // F3
+ case X86II::XD: VEX_PP = 0x3; break; // F2
+ }
// EVEX_U
- unsigned char EVEX_U = 1; // Always '1' so far
+ uint8_t EVEX_U = 1; // Always '1' so far
// EVEX_z
- unsigned char EVEX_z = 0;
+ uint8_t EVEX_z = (HasEVEX_K && (TSFlags & X86II::EVEX_Z)) ? 1 : 0;
// EVEX_b
- unsigned char EVEX_b = 0;
+ uint8_t EVEX_b = (TSFlags & X86II::EVEX_B) ? 1 : 0;
// EVEX_rc
- unsigned char EVEX_rc = 0;
+ uint8_t EVEX_rc = 0;
// EVEX_aaa
- unsigned char EVEX_aaa = 0;
+ uint8_t EVEX_aaa = 0;
bool EncodeRC = false;
- if (TSFlags & X86II::VEX_W)
- VEX_W = 1;
-
- if (TSFlags & X86II::VEX_L)
- VEX_L = 1;
- if (TSFlags & X86II::EVEX_L2)
- EVEX_L2 = 1;
-
- if (HasEVEX_K && (TSFlags & X86II::EVEX_Z))
- EVEX_z = 1;
-
- if ((TSFlags & X86II::EVEX_B))
- EVEX_b = 1;
-
- switch (TSFlags & X86II::OpPrefixMask) {
- default: break; // VEX_PP already correct
- case X86II::PD: VEX_PP = 0x1; break; // 66
- case X86II::XS: VEX_PP = 0x2; break; // F3
- case X86II::XD: VEX_PP = 0x3; break; // F2
- }
-
- switch (TSFlags & X86II::OpMapMask) {
- default: llvm_unreachable("Invalid prefix!");
- case X86II::TB: VEX_5M = 0x1; break; // 0F
- case X86II::T8: VEX_5M = 0x2; break; // 0F 38
- case X86II::TA: VEX_5M = 0x3; break; // 0F 3A
- case X86II::XOP8: VEX_5M = 0x8; break;
- case X86II::XOP9: VEX_5M = 0x9; break;
- case X86II::XOPA: VEX_5M = 0xA; break;
- }
-
// Classify VEX_B, VEX_4V, VEX_R, VEX_X
unsigned NumOps = Desc.getNumOperands();
unsigned CurOp = X86II::getOperandBias(Desc);
@@ -721,38 +717,30 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// MemAddr, src1(VEX_4V), src2(ModR/M)
// MemAddr, src1(ModR/M), imm8
//
- if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand +
- X86::AddrBaseReg).getReg()))
- VEX_B = 0x0;
- if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand +
- X86::AddrIndexReg).getReg()))
- VEX_X = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(MemOperand +
- X86::AddrIndexReg).getReg()))
- EVEX_V2 = 0x0;
+ unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
+ VEX_B = ~(BaseRegEnc >> 3) & 1;
+ unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
+ VEX_X = ~(IndexRegEnc >> 3) & 1;
+ if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
+ EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
CurOp += X86::AddrNumOperands;
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
- const MCOperand &MO = MI.getOperand(CurOp);
- if (MO.isReg()) {
- if (X86II::isX86_64ExtendedReg(MO.getReg()))
- VEX_R = 0x0;
- if (X86II::is32ExtendedReg(MO.getReg()))
- EVEX_R2 = 0x0;
- }
+ unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_R = ~(RegEnc >> 3) & 1;
+ EVEX_R2 = ~(RegEnc >> 4) & 1;
break;
}
- case X86II::MRMSrcMem:
+ case X86II::MRMSrcMem: {
// MRMSrcMem instructions forms:
// src1(ModR/M), MemAddr
// src1(ModR/M), src2(VEX_4V), MemAddr
@@ -762,31 +750,25 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// FMA4:
// dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
// dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_R = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_R2 = 0x0;
- CurOp++;
+ unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_R = ~(RegEnc >> 3) & 1;
+ EVEX_R2 = ~(RegEnc >> 4) & 1;
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
- if (X86II::isX86_64ExtendedReg(
- MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
- VEX_B = 0x0;
- if (X86II::isX86_64ExtendedReg(
- MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
- VEX_X = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(MemOperand +
- X86::AddrIndexReg).getReg()))
- EVEX_V2 = 0x0;
+ unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
+ VEX_B = ~(BaseRegEnc >> 3) & 1;
+ unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
+ VEX_X = ~(IndexRegEnc >> 3) & 1;
+ if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
+ EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
if (HasVEX_4VOp3)
// Instruction format for 4VOp3:
@@ -794,8 +776,9 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// CurOp points to start of the MemoryOperand,
// it skips TIED_TO operands if exist, then increments past src1.
// CurOp + X86::AddrNumOperands will point to src3.
- VEX_4V = getVEXRegisterEncoding(MI, CurOp+X86::AddrNumOperands);
+ VEX_4V = ~getX86RegEncoding(MI, CurOp + X86::AddrNumOperands) & 0xf;
break;
+ }
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
@@ -804,24 +787,21 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// MemAddr
// src1(VEX_4V), MemAddr
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
-
- if (X86II::isX86_64ExtendedReg(
- MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
- VEX_B = 0x0;
- if (X86II::isX86_64ExtendedReg(
- MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
- VEX_X = 0x0;
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
+
+ unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
+ VEX_B = ~(BaseRegEnc >> 3) & 1;
+ unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
+ VEX_X = ~(IndexRegEnc >> 3) & 1;
break;
}
- case X86II::MRMSrcReg:
+ case X86II::MRMSrcReg: {
// MRMSrcReg instructions forms:
// dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
// dst(ModR/M), src1(ModR/M)
@@ -830,32 +810,27 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// FMA4:
// dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
// dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_R = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_R2 = 0x0;
- CurOp++;
+ unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_R = ~(RegEnc >> 3) & 1;
+ EVEX_R2 = ~(RegEnc >> 4) & 1;
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
if (HasMemOp4) // Skip second register source (encoded in I8IMM)
CurOp++;
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_B = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_X = 0x0;
- CurOp++;
+ RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_B = ~(RegEnc >> 3) & 1;
+ VEX_X = ~(RegEnc >> 4) & 1;
if (HasVEX_4VOp3)
- VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
+ VEX_4V = ~getX86RegEncoding(MI, CurOp++) & 0xf;
if (EVEX_b) {
if (HasEVEX_RC) {
unsigned RcOperand = NumOps-1;
@@ -865,55 +840,52 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
EncodeRC = true;
}
break;
- case X86II::MRMDestReg:
+ }
+ case X86II::MRMDestReg: {
// MRMDestReg instructions forms:
// dst(ModR/M), src(ModR/M)
// dst(ModR/M), src(ModR/M), imm8
// dst(ModR/M), src1(VEX_4V), src2(ModR/M)
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_B = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_X = 0x0;
- CurOp++;
+ unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_B = ~(RegEnc >> 3) & 1;
+ VEX_X = ~(RegEnc >> 4) & 1;
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_R = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_R2 = 0x0;
+ RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_R = ~(RegEnc >> 3) & 1;
+ EVEX_R2 = ~(RegEnc >> 4) & 1;
if (EVEX_b)
EncodeRC = true;
break;
+ }
case X86II::MRM0r: case X86II::MRM1r:
case X86II::MRM2r: case X86II::MRM3r:
case X86II::MRM4r: case X86II::MRM5r:
- case X86II::MRM6r: case X86II::MRM7r:
+ case X86II::MRM6r: case X86II::MRM7r: {
// MRM0r-MRM7r instructions forms:
// dst(VEX_4V), src(ModR/M), imm8
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_B = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_X = 0x0;
+ unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_B = ~(RegEnc >> 3) & 1;
+ VEX_X = ~(RegEnc >> 4) & 1;
break;
}
+ }
if (Encoding == X86II::VEX || Encoding == X86II::XOP) {
// VEX opcode prefix can have 2 or 3 bytes
@@ -931,7 +903,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// +-----+ +--------------+ +-------------------+
// | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
// +-----+ +--------------+ +-------------------+
- unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
+ uint8_t LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
// Can we use the 2 byte VEX prefix?
if (Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) {
@@ -954,8 +926,6 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
assert((VEX_5M & 0x3) == VEX_5M
&& "More than 2 significant bits in VEX.m-mmmm fields for EVEX!");
- VEX_5M &= 0x3;
-
EmitByte(0x62, CurByte, OS);
EmitByte((VEX_R << 7) |
(VEX_X << 6) |
@@ -968,26 +938,27 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
VEX_PP, CurByte, OS);
if (EncodeRC)
EmitByte((EVEX_z << 7) |
- (EVEX_rc << 5) |
- (EVEX_b << 4) |
- (EVEX_V2 << 3) |
- EVEX_aaa, CurByte, OS);
+ (EVEX_rc << 5) |
+ (EVEX_b << 4) |
+ (EVEX_V2 << 3) |
+ EVEX_aaa, CurByte, OS);
else
EmitByte((EVEX_z << 7) |
- (EVEX_L2 << 6) |
- (VEX_L << 5) |
- (EVEX_b << 4) |
- (EVEX_V2 << 3) |
- EVEX_aaa, CurByte, OS);
+ (EVEX_L2 << 6) |
+ (VEX_L << 5) |
+ (EVEX_b << 4) |
+ (EVEX_V2 << 3) |
+ EVEX_aaa, CurByte, OS);
}
}
/// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64
/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
/// size, and 3) use of X86-64 extended registers.
-static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
- const MCInstrDesc &Desc) {
- unsigned REX = 0;
+uint8_t X86MCCodeEmitter::DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
+ int MemOperand,
+ const MCInstrDesc &Desc) const {
+ uint8_t REX = 0;
bool UsesHighByteReg = false;
if (TSFlags & X86II::REX_W)
@@ -996,13 +967,10 @@ static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
if (MI.getNumOperands() == 0) return REX;
unsigned NumOps = MI.getNumOperands();
- // FIXME: MCInst should explicitize the two-addrness.
- bool isTwoAddr = NumOps > 1 &&
- Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1;
+ unsigned CurOp = X86II::getOperandBias(Desc);
// If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
- unsigned i = isTwoAddr ? 1 : 0;
- for (; i != NumOps; ++i) {
+ for (unsigned i = CurOp; i != NumOps; ++i) {
const MCOperand &MO = MI.getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
@@ -1016,65 +984,44 @@ static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
}
switch (TSFlags & X86II::FormMask) {
+ case X86II::AddRegFrm:
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 0; // REX.B
+ break;
case X86II::MRMSrcReg:
- if (MI.getOperand(0).isReg() &&
- X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 2; // set REX.R
- i = isTwoAddr ? 2 : 1;
- for (; i != NumOps; ++i) {
- const MCOperand &MO = MI.getOperand(i);
- if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << 0; // set REX.B
- }
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 2; // REX.R
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 0; // REX.B
break;
case X86II::MRMSrcMem: {
- if (MI.getOperand(0).isReg() &&
- X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 2; // set REX.R
- unsigned Bit = 0;
- i = isTwoAddr ? 2 : 1;
- for (; i != NumOps; ++i) {
- const MCOperand &MO = MI.getOperand(i);
- if (MO.isReg()) {
- if (X86II::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << Bit; // set REX.B (Bit=0) and REX.X (Bit=1)
- Bit++;
- }
- }
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 2; // REX.R
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
+ CurOp += X86::AddrNumOperands;
break;
}
+ case X86II::MRMDestReg:
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 0; // REX.B
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 2; // REX.R
+ break;
+ case X86II::MRMDestMem:
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
+ CurOp += X86::AddrNumOperands;
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 2; // REX.R
+ break;
case X86II::MRMXm:
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m:
- case X86II::MRMDestMem: {
- unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands);
- i = isTwoAddr ? 1 : 0;
- if (NumOps > e && MI.getOperand(e).isReg() &&
- X86II::isX86_64ExtendedReg(MI.getOperand(e).getReg()))
- REX |= 1 << 2; // set REX.R
- unsigned Bit = 0;
- for (; i != e; ++i) {
- const MCOperand &MO = MI.getOperand(i);
- if (MO.isReg()) {
- if (X86II::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << Bit; // REX.B (Bit=0) and REX.X (Bit=1)
- Bit++;
- }
- }
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
break;
- }
- default:
- if (MI.getOperand(0).isReg() &&
- X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 0; // set REX.B
- i = isTwoAddr ? 2 : 1;
- for (unsigned e = NumOps; i != e; ++i) {
- const MCOperand &MO = MI.getOperand(i);
- if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << 2; // set REX.R
- }
+ case X86II::MRMXr:
+ case X86II::MRM0r: case X86II::MRM1r:
+ case X86II::MRM2r: case X86II::MRM3r:
+ case X86II::MRM4r: case X86II::MRM5r:
+ case X86II::MRM6r: case X86II::MRM7r:
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 0; // REX.B
break;
}
if (REX && UsesHighByteReg)
@@ -1101,16 +1048,18 @@ void X86MCCodeEmitter::EmitSegmentOverridePrefix(unsigned &CurByte,
}
}
-/// EmitOpcodePrefix - Emit all instruction prefixes prior to the opcode.
+/// Emit all instruction prefixes prior to the opcode.
///
/// MemOperand is the operand # of the start of a memory operand if present. If
/// Not present, it is -1.
-void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
+///
+/// Returns true if a REX prefix was used.
+bool X86MCCodeEmitter::emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
int MemOperand, const MCInst &MI,
const MCInstrDesc &Desc,
const MCSubtargetInfo &STI,
raw_ostream &OS) const {
-
+ bool Ret = false;
// Emit the operand size opcode prefix as needed.
if ((TSFlags & X86II::OpSizeMask) == (is16BitMode(STI) ? X86II::OpSize32
: X86II::OpSize16))
@@ -1135,8 +1084,10 @@ void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// Handle REX prefix.
// FIXME: Can this come before F2 etc to simplify emission?
if (is64BitMode(STI)) {
- if (unsigned REX = DetermineREXPrefix(MI, TSFlags, Desc))
+ if (uint8_t REX = DetermineREXPrefix(MI, TSFlags, MemOperand, Desc)) {
EmitByte(0x40 | REX, CurByte, OS);
+ Ret = true;
+ }
}
// 0x0F escape code must be emitted just before the opcode.
@@ -1156,6 +1107,7 @@ void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
EmitByte(0x3A, CurByte, OS);
break;
}
+ return Ret;
}
void X86MCCodeEmitter::
@@ -1183,14 +1135,18 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
bool HasVEX_4V = TSFlags & X86II::VEX_4V;
bool HasVEX_4VOp3 = TSFlags & X86II::VEX_4VOp3;
bool HasMemOp4 = TSFlags & X86II::MemOp4;
- const unsigned MemOp4_I8IMMOperand = 2;
+ bool HasVEX_I8IMM = TSFlags & X86II::VEX_I8IMM;
+ assert((!HasMemOp4 || HasVEX_I8IMM) && "MemOp4 should imply VEX_I8IMM");
// It uses the EVEX.aaa field?
bool HasEVEX_K = TSFlags & X86II::EVEX_K;
bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
+ // Used if a register is encoded in 7:4 of immediate.
+ unsigned I8RegNum = 0;
+
// Determine where the memory operand starts, if present.
- int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode);
+ int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
if (MemoryOperand != -1) MemoryOperand += CurOp;
// Emit segment override opcode prefix as needed.
@@ -1226,19 +1182,20 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
if (need_address_override)
EmitByte(0x67, CurByte, OS);
+ bool Rex = false;
if (Encoding == 0)
- EmitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS);
+ Rex = emitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS);
else
EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
- unsigned char BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
+ uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
if (TSFlags & X86II::Has3DNow0F0FOpcode)
BaseOpcode = 0x0F; // Weird 3DNow! encoding.
- unsigned SrcRegNum = 0;
- switch (TSFlags & X86II::FormMask) {
- default: errs() << "FORM: " << (TSFlags & X86II::FormMask) << "\n";
+ uint64_t Form = TSFlags & X86II::FormMask;
+ switch (Form) {
+ default: errs() << "FORM: " << Form << "\n";
llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!");
case X86II::Pseudo:
llvm_unreachable("Pseudo instruction shouldn't be emitted");
@@ -1315,12 +1272,12 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS);
break;
- case X86II::MRMDestReg:
+ case X86II::MRMDestReg: {
EmitByte(BaseOpcode, CurByte, OS);
- SrcRegNum = CurOp + 1;
+ unsigned SrcRegNum = CurOp + 1;
if (HasEVEX_K) // Skip writemask
- SrcRegNum++;
+ ++SrcRegNum;
if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
++SrcRegNum;
@@ -1329,71 +1286,68 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
GetX86RegNum(MI.getOperand(SrcRegNum)), CurByte, OS);
CurOp = SrcRegNum + 1;
break;
-
- case X86II::MRMDestMem:
+ }
+ case X86II::MRMDestMem: {
EmitByte(BaseOpcode, CurByte, OS);
- SrcRegNum = CurOp + X86::AddrNumOperands;
+ unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
if (HasEVEX_K) // Skip writemask
- SrcRegNum++;
+ ++SrcRegNum;
if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
++SrcRegNum;
- EmitMemModRMByte(MI, CurOp,
- GetX86RegNum(MI.getOperand(SrcRegNum)),
- TSFlags, CurByte, OS, Fixups, STI);
+ emitMemModRMByte(MI, CurOp, GetX86RegNum(MI.getOperand(SrcRegNum)), TSFlags,
+ Rex, CurByte, OS, Fixups, STI);
CurOp = SrcRegNum + 1;
break;
-
- case X86II::MRMSrcReg:
+ }
+ case X86II::MRMSrcReg: {
EmitByte(BaseOpcode, CurByte, OS);
- SrcRegNum = CurOp + 1;
+ unsigned SrcRegNum = CurOp + 1;
if (HasEVEX_K) // Skip writemask
- SrcRegNum++;
+ ++SrcRegNum;
if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
++SrcRegNum;
- if (HasMemOp4) // Skip 2nd src (which is encoded in I8IMM)
- ++SrcRegNum;
+ if (HasMemOp4) // Capture 2nd src (which is encoded in I8IMM)
+ I8RegNum = getX86RegEncoding(MI, SrcRegNum++);
EmitRegModRMByte(MI.getOperand(SrcRegNum),
GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
-
- // 2 operands skipped with HasMemOp4, compensate accordingly
- CurOp = HasMemOp4 ? SrcRegNum : SrcRegNum + 1;
+ CurOp = SrcRegNum + 1;
if (HasVEX_4VOp3)
++CurOp;
+ if (!HasMemOp4 && HasVEX_I8IMM)
+ I8RegNum = getX86RegEncoding(MI, CurOp++);
// do not count the rounding control operand
if (HasEVEX_RC)
- NumOps--;
+ --NumOps;
break;
-
+ }
case X86II::MRMSrcMem: {
- int AddrOperands = X86::AddrNumOperands;
unsigned FirstMemOp = CurOp+1;
- if (HasEVEX_K) { // Skip writemask
- ++AddrOperands;
+ if (HasEVEX_K) // Skip writemask
++FirstMemOp;
- }
- if (HasVEX_4V) {
- ++AddrOperands;
+ if (HasVEX_4V)
++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
- }
- if (HasMemOp4) // Skip second register source (encoded in I8IMM)
- ++FirstMemOp;
+
+ if (HasMemOp4) // Capture second register source (encoded in I8IMM)
+ I8RegNum = getX86RegEncoding(MI, FirstMemOp++);
EmitByte(BaseOpcode, CurByte, OS);
- EmitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
- TSFlags, CurByte, OS, Fixups, STI);
- CurOp += AddrOperands + 1;
+ emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
+ TSFlags, Rex, CurByte, OS, Fixups, STI);
+ CurOp = FirstMemOp + X86::AddrNumOperands;
if (HasVEX_4VOp3)
++CurOp;
+ if (!HasMemOp4 && HasVEX_I8IMM)
+ I8RegNum = getX86RegEncoding(MI, CurOp++);
break;
}
@@ -1407,7 +1361,6 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
if (HasEVEX_K) // Skip writemask
++CurOp;
EmitByte(BaseOpcode, CurByte, OS);
- uint64_t Form = TSFlags & X86II::FormMask;
EmitRegModRMByte(MI.getOperand(CurOp++),
(Form == X86II::MRMXr) ? 0 : Form-X86II::MRM0r,
CurByte, OS);
@@ -1424,9 +1377,9 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
if (HasEVEX_K) // Skip writemask
++CurOp;
EmitByte(BaseOpcode, CurByte, OS);
- uint64_t Form = TSFlags & X86II::FormMask;
- EmitMemModRMByte(MI, CurOp, (Form == X86II::MRMXm) ? 0 : Form-X86II::MRM0m,
- TSFlags, CurByte, OS, Fixups, STI);
+ emitMemModRMByte(MI, CurOp,
+ (Form == X86II::MRMXm) ? 0 : Form - X86II::MRM0m, TSFlags,
+ Rex, CurByte, OS, Fixups, STI);
CurOp += X86::AddrNumOperands;
break;
}
@@ -1453,38 +1406,27 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
case X86II::MRM_FC: case X86II::MRM_FD: case X86II::MRM_FE:
case X86II::MRM_FF:
EmitByte(BaseOpcode, CurByte, OS);
-
- uint64_t Form = TSFlags & X86II::FormMask;
EmitByte(0xC0 + Form - X86II::MRM_C0, CurByte, OS);
break;
}
- // If there is a remaining operand, it must be a trailing immediate. Emit it
- // according to the right size for the instruction. Some instructions
- // (SSE4a extrq and insertq) have two trailing immediates.
- while (CurOp != NumOps && NumOps - CurOp <= 2) {
+ if (HasVEX_I8IMM) {
// The last source register of a 4 operand instruction in AVX is encoded
// in bits[7:4] of a immediate byte.
- if (TSFlags & X86II::VEX_I8IMM) {
- const MCOperand &MO = MI.getOperand(HasMemOp4 ? MemOp4_I8IMMOperand
- : CurOp);
- ++CurOp;
- unsigned RegNum = GetX86RegNum(MO) << 4;
- if (X86II::isX86_64ExtendedReg(MO.getReg()))
- RegNum |= 1 << 7;
- // If there is an additional 5th operand it must be an immediate, which
- // is encoded in bits[3:0]
- if (CurOp != NumOps) {
- const MCOperand &MIMM = MI.getOperand(CurOp++);
- if (MIMM.isImm()) {
- unsigned Val = MIMM.getImm();
- assert(Val < 16 && "Immediate operand value out of range");
- RegNum |= Val;
- }
- }
- EmitImmediate(MCOperand::createImm(RegNum), MI.getLoc(), 1, FK_Data_1,
- CurByte, OS, Fixups);
- } else {
+ assert(I8RegNum < 16 && "Register encoding out of range");
+ I8RegNum <<= 4;
+ if (CurOp != NumOps) {
+ unsigned Val = MI.getOperand(CurOp++).getImm();
+ assert(Val < 16 && "Immediate operand value out of range");
+ I8RegNum |= Val;
+ }
+ EmitImmediate(MCOperand::createImm(I8RegNum), MI.getLoc(), 1, FK_Data_1,
+ CurByte, OS, Fixups);
+ } else {
+ // If there is a remaining operand, it must be a trailing immediate. Emit it
+ // according to the right size for the instruction. Some instructions
+ // (SSE4a extrq and insertq) have two trailing immediates.
+ while (CurOp != NumOps && NumOps - CurOp <= 2) {
EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
CurByte, OS, Fixups);
diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index 53a6550acdd5c..311a8d677eeab 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -16,7 +16,6 @@
#include "InstPrinter/X86IntelInstPrinter.h"
#include "X86MCAsmInfo.h"
#include "llvm/ADT/Triple.h"
-#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrAnalysis.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
@@ -66,12 +65,59 @@ unsigned X86_MC::getDwarfRegFlavour(const Triple &TT, bool isEH) {
return DWARFFlavour::X86_32_Generic;
}
-void X86_MC::InitLLVM2SEHRegisterMapping(MCRegisterInfo *MRI) {
+void X86_MC::initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI) {
// FIXME: TableGen these.
- for (unsigned Reg = X86::NoRegister+1; Reg < X86::NUM_TARGET_REGS; ++Reg) {
+ for (unsigned Reg = X86::NoRegister + 1; Reg < X86::NUM_TARGET_REGS; ++Reg) {
unsigned SEH = MRI->getEncodingValue(Reg);
MRI->mapLLVMRegToSEHReg(Reg, SEH);
}
+
+ // These CodeView registers are numbered sequentially starting at value 1.
+ static const MCPhysReg LowCVRegs[] = {
+ X86::AL, X86::CL, X86::DL, X86::BL, X86::AH, X86::CH,
+ X86::DH, X86::BH, X86::AX, X86::CX, X86::DX, X86::BX,
+ X86::SP, X86::BP, X86::SI, X86::DI, X86::EAX, X86::ECX,
+ X86::EDX, X86::EBX, X86::ESP, X86::EBP, X86::ESI, X86::EDI,
+ };
+ unsigned CVLowRegStart = 1;
+ for (unsigned I = 0; I < array_lengthof(LowCVRegs); ++I)
+ MRI->mapLLVMRegToCVReg(LowCVRegs[I], I + CVLowRegStart);
+
+ MRI->mapLLVMRegToCVReg(X86::EFLAGS, 34);
+
+ // The x87 registers start at 128 and are numbered sequentially.
+ unsigned FP0Start = 128;
+ for (unsigned I = 0; I < 8; ++I)
+ MRI->mapLLVMRegToCVReg(X86::FP0 + I, FP0Start + I);
+
+ // The low 8 XMM registers start at 154 and are numbered sequentially.
+ unsigned CVXMM0Start = 154;
+ for (unsigned I = 0; I < 8; ++I)
+ MRI->mapLLVMRegToCVReg(X86::XMM0 + I, CVXMM0Start + I);
+
+ // The high 8 XMM registers start at 252 and are numbered sequentially.
+ unsigned CVXMM8Start = 252;
+ for (unsigned I = 0; I < 8; ++I)
+ MRI->mapLLVMRegToCVReg(X86::XMM8 + I, CVXMM8Start + I);
+
+ // FIXME: XMM16 and above from AVX512 not yet documented.
+
+ // AMD64 registers start at 324 and count up.
+ unsigned CVX64RegStart = 324;
+ static const MCPhysReg CVX64Regs[] = {
+ X86::SIL, X86::DIL, X86::BPL, X86::SPL, X86::RAX, X86::RBX,
+ X86::RCX, X86::RDX, X86::RSI, X86::RDI, X86::RBP, X86::RSP,
+ X86::R8, X86::R9, X86::R10, X86::R11, X86::R12, X86::R13,
+ X86::R14, X86::R15, X86::R8B, X86::R9B, X86::R10B, X86::R11B,
+ X86::R12B, X86::R13B, X86::R14B, X86::R15B, X86::R8W, X86::R9W,
+ X86::R10W, X86::R11W, X86::R12W, X86::R13W, X86::R14W, X86::R15W,
+ X86::R8D, X86::R9D, X86::R10D, X86::R11D, X86::R12D, X86::R13D,
+ X86::R14D, X86::R15D, X86::YMM0, X86::YMM1, X86::YMM2, X86::YMM3,
+ X86::YMM4, X86::YMM5, X86::YMM6, X86::YMM7, X86::YMM8, X86::YMM9,
+ X86::YMM10, X86::YMM11, X86::YMM12, X86::YMM13, X86::YMM14, X86::YMM15,
+ };
+ for (unsigned I = 0; I < array_lengthof(CVX64Regs); ++I)
+ MRI->mapLLVMRegToCVReg(CVX64Regs[I], CVX64RegStart + I);
}
MCSubtargetInfo *X86_MC::createX86MCSubtargetInfo(const Triple &TT,
@@ -105,7 +151,7 @@ static MCRegisterInfo *createX86MCRegisterInfo(const Triple &TT) {
MCRegisterInfo *X = new MCRegisterInfo();
InitX86MCRegisterInfo(X, RA, X86_MC::getDwarfRegFlavour(TT, false),
X86_MC::getDwarfRegFlavour(TT, true), RA);
- X86_MC::InitLLVM2SEHRegisterMapping(X);
+ X86_MC::initLLVMToSEHAndCVRegMapping(X);
return X;
}
@@ -152,53 +198,16 @@ static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI,
return MAI;
}
-static MCCodeGenInfo *createX86MCCodeGenInfo(const Triple &TT, Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
-
+static void adjustCodeGenOpts(const Triple &TT, Reloc::Model RM,
+ CodeModel::Model &CM) {
bool is64Bit = TT.getArch() == Triple::x86_64;
- if (RM == Reloc::Default) {
- // Darwin defaults to PIC in 64 bit mode and dynamic-no-pic in 32 bit mode.
- // Win64 requires rip-rel addressing, thus we force it to PIC. Otherwise we
- // use static relocation model by default.
- if (TT.isOSDarwin()) {
- if (is64Bit)
- RM = Reloc::PIC_;
- else
- RM = Reloc::DynamicNoPIC;
- } else if (TT.isOSWindows() && is64Bit)
- RM = Reloc::PIC_;
- else
- RM = Reloc::Static;
- }
-
- // ELF and X86-64 don't have a distinct DynamicNoPIC model. DynamicNoPIC
- // is defined as a model for code which may be used in static or dynamic
- // executables but not necessarily a shared library. On X86-32 we just
- // compile in -static mode, in x86-64 we use PIC.
- if (RM == Reloc::DynamicNoPIC) {
- if (is64Bit)
- RM = Reloc::PIC_;
- else if (!TT.isOSDarwin())
- RM = Reloc::Static;
- }
-
- // If we are on Darwin, disallow static relocation model in X86-64 mode, since
- // the Mach-O file format doesn't support it.
- if (RM == Reloc::Static && TT.isOSDarwin() && is64Bit)
- RM = Reloc::PIC_;
-
// For static codegen, if we're not already set, use Small codegen.
if (CM == CodeModel::Default)
CM = CodeModel::Small;
else if (CM == CodeModel::JITDefault)
// 64-bit JIT places everything in the same buffer except external funcs.
CM = is64Bit ? CodeModel::Large : CodeModel::Small;
-
- X->initMCCodeGenInfo(RM, CM, OL);
- return X;
}
static MCInstPrinter *createX86MCInstPrinter(const Triple &T,
@@ -215,10 +224,6 @@ static MCInstPrinter *createX86MCInstPrinter(const Triple &T,
static MCRelocationInfo *createX86MCRelocationInfo(const Triple &TheTriple,
MCContext &Ctx) {
- if (TheTriple.isOSBinFormatMachO() && TheTriple.getArch() == Triple::x86_64)
- return createX86_64MachORelocationInfo(Ctx);
- else if (TheTriple.isOSBinFormatELF())
- return createX86_64ELFRelocationInfo(Ctx);
// Default to the stock relocation info.
return llvm::createMCRelocationInfo(TheTriple, Ctx);
}
@@ -234,7 +239,7 @@ extern "C" void LLVMInitializeX86TargetMC() {
RegisterMCAsmInfoFn X(*T, createX86MCAsmInfo);
// Register the MC codegen info.
- RegisterMCCodeGenInfoFn Y(*T, createX86MCCodeGenInfo);
+ RegisterMCAdjustCodeGenOptsFn Y(*T, adjustCodeGenOpts);
// Register the MC instruction info.
TargetRegistry::RegisterMCInstrInfo(*T, createX86MCInstrInfo);
diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
index 2d2836ff07c55..ca4f0d3e17d5d 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
+++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
@@ -14,6 +14,7 @@
#ifndef LLVM_LIB_TARGET_X86_MCTARGETDESC_X86MCTARGETDESC_H
#define LLVM_LIB_TARGET_X86_MCTARGETDESC_X86MCTARGETDESC_H
+#include "llvm/MC/MCStreamer.h"
#include "llvm/Support/DataTypes.h"
#include <string>
@@ -26,7 +27,6 @@ class MCObjectWriter;
class MCRegisterInfo;
class MCSubtargetInfo;
class MCRelocationInfo;
-class MCStreamer;
class Target;
class Triple;
class StringRef;
@@ -56,7 +56,7 @@ std::string ParseX86Triple(const Triple &TT);
unsigned getDwarfRegFlavour(const Triple &TT, bool isEH);
-void InitLLVM2SEHRegisterMapping(MCRegisterInfo *MRI);
+void initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI);
/// Create a X86 MCSubtargetInfo instance. This is exposed so Asm parser, etc.
/// do not need to go through TargetRegistry.
@@ -93,12 +93,6 @@ MCObjectWriter *createX86ELFObjectWriter(raw_pwrite_stream &OS, bool IsELF64,
MCObjectWriter *createX86WinCOFFObjectWriter(raw_pwrite_stream &OS,
bool Is64Bit);
-/// Construct X86-64 Mach-O relocation info.
-MCRelocationInfo *createX86_64MachORelocationInfo(MCContext &Ctx);
-
-/// Construct X86-64 ELF relocation info.
-MCRelocationInfo *createX86_64ELFRelocationInfo(MCContext &Ctx);
-
/// Returns the sub or super register of a specific X86 register.
/// e.g. getX86SubSuperRegister(X86::EAX, 16) returns X86::AX.
/// Aborts on error.
diff --git a/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp b/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp
deleted file mode 100644
index 9bfe999424fa8..0000000000000
--- a/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp
+++ /dev/null
@@ -1,119 +0,0 @@
-//===-- X86MachORelocationInfo.cpp ----------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/X86MCTargetDesc.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCRelocationInfo.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Object/MachO.h"
-
-using namespace llvm;
-using namespace object;
-using namespace MachO;
-
-namespace {
-class X86_64MachORelocationInfo : public MCRelocationInfo {
-public:
- X86_64MachORelocationInfo(MCContext &Ctx) : MCRelocationInfo(Ctx) {}
-
- const MCExpr *createExprForRelocation(RelocationRef Rel) override {
- const MachOObjectFile *Obj = cast<MachOObjectFile>(Rel.getObject());
-
- uint64_t RelType = Rel.getType();
- symbol_iterator SymI = Rel.getSymbol();
-
- ErrorOr<StringRef> SymNameOrErr = SymI->getName();
- if (std::error_code EC = SymNameOrErr.getError())
- report_fatal_error(EC.message());
- StringRef SymName = *SymNameOrErr;
- uint64_t SymAddr = SymI->getValue();
-
- any_relocation_info RE = Obj->getRelocation(Rel.getRawDataRefImpl());
- bool isPCRel = Obj->getAnyRelocationPCRel(RE);
-
- MCSymbol *Sym = Ctx.getOrCreateSymbol(SymName);
- // FIXME: check that the value is actually the same.
- if (!Sym->isVariable())
- Sym->setVariableValue(MCConstantExpr::create(SymAddr, Ctx));
- const MCExpr *Expr = nullptr;
-
- switch(RelType) {
- case X86_64_RELOC_TLV:
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx);
- break;
- case X86_64_RELOC_SIGNED_4:
- Expr = MCBinaryExpr::createAdd(MCSymbolRefExpr::create(Sym, Ctx),
- MCConstantExpr::create(4, Ctx),
- Ctx);
- break;
- case X86_64_RELOC_SIGNED_2:
- Expr = MCBinaryExpr::createAdd(MCSymbolRefExpr::create(Sym, Ctx),
- MCConstantExpr::create(2, Ctx),
- Ctx);
- break;
- case X86_64_RELOC_SIGNED_1:
- Expr = MCBinaryExpr::createAdd(MCSymbolRefExpr::create(Sym, Ctx),
- MCConstantExpr::create(1, Ctx),
- Ctx);
- break;
- case X86_64_RELOC_GOT_LOAD:
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_GOTPCREL, Ctx);
- break;
- case X86_64_RELOC_GOT:
- Expr = MCSymbolRefExpr::create(Sym, isPCRel ?
- MCSymbolRefExpr::VK_GOTPCREL :
- MCSymbolRefExpr::VK_GOT,
- Ctx);
- break;
- case X86_64_RELOC_SUBTRACTOR:
- {
- Rel.moveNext();
- any_relocation_info RENext =
- Obj->getRelocation(Rel.getRawDataRefImpl());
-
- // X86_64_SUBTRACTOR must be followed by a relocation of type
- // X86_64_RELOC_UNSIGNED.
- // NOTE: Scattered relocations don't exist on x86_64.
- unsigned RType = Obj->getAnyRelocationType(RENext);
- if (RType != X86_64_RELOC_UNSIGNED)
- report_fatal_error("Expected X86_64_RELOC_UNSIGNED after "
- "X86_64_RELOC_SUBTRACTOR.");
-
- const MCExpr *LHS = MCSymbolRefExpr::create(Sym, Ctx);
-
- symbol_iterator RSymI = Rel.getSymbol();
- uint64_t RSymAddr = RSymI->getValue();
- ErrorOr<StringRef> RSymName = RSymI->getName();
- if (std::error_code EC = RSymName.getError())
- report_fatal_error(EC.message());
-
- MCSymbol *RSym = Ctx.getOrCreateSymbol(*RSymName);
- if (!RSym->isVariable())
- RSym->setVariableValue(MCConstantExpr::create(RSymAddr, Ctx));
-
- const MCExpr *RHS = MCSymbolRefExpr::create(RSym, Ctx);
-
- Expr = MCBinaryExpr::createSub(LHS, RHS, Ctx);
- break;
- }
- default:
- Expr = MCSymbolRefExpr::create(Sym, Ctx);
- break;
- }
- return Expr;
- }
-};
-} // End unnamed namespace
-
-/// createX86_64MachORelocationInfo - Construct an X86-64 Mach-O RelocationInfo.
-MCRelocationInfo *llvm::createX86_64MachORelocationInfo(MCContext &Ctx) {
- return new X86_64MachORelocationInfo(Ctx);
-}
diff --git a/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
index 191ebeac7265e..297926ddcfdaf 100644
--- a/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
@@ -73,7 +73,9 @@ public:
static bool isFixupKindRIPRel(unsigned Kind) {
return Kind == X86::reloc_riprel_4byte ||
- Kind == X86::reloc_riprel_4byte_movq_load;
+ Kind == X86::reloc_riprel_4byte_movq_load ||
+ Kind == X86::reloc_riprel_4byte_relax ||
+ Kind == X86::reloc_riprel_4byte_relax_rex;
}
static unsigned getFixupKindLog2Size(unsigned Kind) {
@@ -87,8 +89,11 @@ static unsigned getFixupKindLog2Size(unsigned Kind) {
case FK_PCRel_4:
// FIXME: Remove these!!!
case X86::reloc_riprel_4byte:
+ case X86::reloc_riprel_4byte_relax:
+ case X86::reloc_riprel_4byte_relax_rex:
case X86::reloc_riprel_4byte_movq_load:
case X86::reloc_signed_4byte:
+ case X86::reloc_signed_4byte_relax:
case FK_Data_4: return 2;
case FK_Data_8: return 3;
}
diff --git a/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
index bd1bc9943b6d0..33376b6d1b906 100644
--- a/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
@@ -53,11 +53,16 @@ unsigned X86WinCOFFObjectWriter::getRelocType(const MCValue &Target,
case FK_PCRel_4:
case X86::reloc_riprel_4byte:
case X86::reloc_riprel_4byte_movq_load:
+ case X86::reloc_riprel_4byte_relax:
+ case X86::reloc_riprel_4byte_relax_rex:
return COFF::IMAGE_REL_AMD64_REL32;
case FK_Data_4:
case X86::reloc_signed_4byte:
+ case X86::reloc_signed_4byte_relax:
if (Modifier == MCSymbolRefExpr::VK_COFF_IMGREL32)
return COFF::IMAGE_REL_AMD64_ADDR32NB;
+ if (Modifier == MCSymbolRefExpr::VK_SECREL)
+ return COFF::IMAGE_REL_AMD64_SECREL;
return COFF::IMAGE_REL_AMD64_ADDR32;
case FK_Data_8:
return COFF::IMAGE_REL_AMD64_ADDR64;
@@ -76,8 +81,11 @@ unsigned X86WinCOFFObjectWriter::getRelocType(const MCValue &Target,
return COFF::IMAGE_REL_I386_REL32;
case FK_Data_4:
case X86::reloc_signed_4byte:
+ case X86::reloc_signed_4byte_relax:
if (Modifier == MCSymbolRefExpr::VK_COFF_IMGREL32)
return COFF::IMAGE_REL_I386_DIR32NB;
+ if (Modifier == MCSymbolRefExpr::VK_SECREL)
+ return COFF::IMAGE_REL_AMD64_SECREL;
return COFF::IMAGE_REL_I386_DIR32;
case FK_SecRel_2:
return COFF::IMAGE_REL_I386_SECTION;
diff --git a/lib/Target/X86/Makefile b/lib/Target/X86/Makefile
deleted file mode 100644
index e518fecf044fd..0000000000000
--- a/lib/Target/X86/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-##===- lib/Target/X86/Makefile -----------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMX86CodeGen
-TARGET = X86
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = X86GenRegisterInfo.inc X86GenInstrInfo.inc \
- X86GenAsmWriter.inc X86GenAsmMatcher.inc \
- X86GenAsmWriter1.inc X86GenDAGISel.inc \
- X86GenDisassemblerTables.inc X86GenFastISel.inc \
- X86GenCallingConv.inc X86GenSubtargetInfo.inc
-
-DIRS = InstPrinter AsmParser Disassembler TargetInfo MCTargetDesc Utils
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/X86/README-X86-64.txt b/lib/Target/X86/README-X86-64.txt
index bcfdf0bc56b28..09626e13849d6 100644
--- a/lib/Target/X86/README-X86-64.txt
+++ b/lib/Target/X86/README-X86-64.txt
@@ -170,7 +170,7 @@ generated for it. The primary issue with the result is that it doesn't do any
of the optimizations which are possible if we know the address of a va_list
in the current function is never taken:
1. We shouldn't spill the XMM registers because we only call va_arg with "int".
-2. It would be nice if we could scalarrepl the va_list.
+2. It would be nice if we could sroa the va_list.
3. Probably overkill, but it'd be cool if we could peel off the first five
iterations of the loop.
diff --git a/lib/Target/X86/README.txt b/lib/Target/X86/README.txt
index 19a1832017556..799157c926e65 100644
--- a/lib/Target/X86/README.txt
+++ b/lib/Target/X86/README.txt
@@ -50,8 +50,8 @@ Some isel ideas:
2. Code duplication (addressing mode) during isel.
3. Other ideas from "Register-Sensitive Selection, Duplication, and
Sequencing of Instructions".
-4. Scheduling for reduced register pressure. E.g. "Minimum Register
- Instruction Sequence Problem: Revisiting Optimal Code Generation for DAGs"
+4. Scheduling for reduced register pressure. E.g. "Minimum Register
+ Instruction Sequence Problem: Revisiting Optimal Code Generation for DAGs"
and other related papers.
http://citeseer.ist.psu.edu/govindarajan01minimum.html
@@ -73,7 +73,7 @@ It appears icc use push for parameter passing. Need to investigate.
//===---------------------------------------------------------------------===//
The instruction selector sometimes misses folding a load into a compare. The
-pattern is written as (cmp reg, (load p)). Because the compare isn't
+pattern is written as (cmp reg, (load p)). Because the compare isn't
commutative, it is not matched with the load on both sides. The dag combiner
should be made smart enough to canonicalize the load into the RHS of a compare
when it can invert the result of the compare for free.
diff --git a/lib/Target/X86/TargetInfo/Makefile b/lib/Target/X86/TargetInfo/Makefile
deleted file mode 100644
index ee91982df0c86..0000000000000
--- a/lib/Target/X86/TargetInfo/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/X86/TargetInfo/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMX86Info
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/X86/Utils/Makefile b/lib/Target/X86/Utils/Makefile
deleted file mode 100644
index 1df6f0f561d45..0000000000000
--- a/lib/Target/X86/Utils/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/X86/Utils/Makefile -----------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMX86Utils
-
-# Hack: we need to include 'main' x86 target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/X86/Utils/X86ShuffleDecode.cpp b/lib/Target/X86/Utils/X86ShuffleDecode.cpp
index 619f7c8d25df9..18f71675437bf 100644
--- a/lib/Target/X86/Utils/X86ShuffleDecode.cpp
+++ b/lib/Target/X86/Utils/X86ShuffleDecode.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "X86ShuffleDecode.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/CodeGen/MachineValueType.h"
//===----------------------------------------------------------------------===//
@@ -44,6 +45,17 @@ void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
if (ZMask & 8) ShuffleMask[3] = SM_SentinelZero;
}
+void DecodeInsertElementMask(MVT VT, unsigned Idx, unsigned Len,
+ SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = VT.getVectorNumElements();
+ assert((Idx + Len) <= NumElts && "Insertion out of range");
+
+ for (unsigned i = 0; i != NumElts; ++i)
+ ShuffleMask.push_back(i);
+ for (unsigned i = 0; i != Len; ++i)
+ ShuffleMask[Idx + i] = NumElts + i;
+}
+
// <3,1> or <6,7,2,3>
void DecodeMOVHLPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask) {
for (unsigned i = NElts / 2; i != NElts; ++i)
@@ -263,6 +275,25 @@ void DecodeUNPCKLMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
}
}
+/// Decodes a broadcast of the first element of a vector.
+void DecodeVectorBroadcast(MVT DstVT, SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = DstVT.getVectorNumElements();
+ ShuffleMask.append(NumElts, 0);
+}
+
+/// Decodes a broadcast of a subvector to a larger vector type.
+void DecodeSubVectorBroadcast(MVT DstVT, MVT SrcVT,
+ SmallVectorImpl<int> &ShuffleMask) {
+ assert(SrcVT.getScalarType() == DstVT.getScalarType() &&
+ "Non matching vector element types");
+ unsigned NumElts = SrcVT.getVectorNumElements();
+ unsigned Scale = DstVT.getSizeInBits() / SrcVT.getSizeInBits();
+
+ for (unsigned i = 0; i != Scale; ++i)
+ for (unsigned j = 0; j != NumElts; ++j)
+ ShuffleMask.push_back(j);
+}
+
/// \brief Decode a shuffle packed values at 128-bit granularity
/// (SHUFF32x4/SHUFF64x2/SHUFI32x4/SHUFI64x2)
/// immediate mask into a shuffle mask.
@@ -303,9 +334,9 @@ void DecodePSHUFBMask(ArrayRef<uint64_t> RawMask,
ShuffleMask.push_back(M);
continue;
}
- // For AVX vectors with 32 bytes the base of the shuffle is the half of
- // the vector we're inside.
- int Base = i < 16 ? 0 : 16;
+ // For 256/512-bit vectors the base of the shuffle is the 128-bit
+ // subvector we're inside.
+ int Base = (i / 16) * 16;
// If the high bit (7) of the byte is set, the element is zeroed.
if (M & (1 << 7))
ShuffleMask.push_back(SM_SentinelZero);
@@ -331,23 +362,62 @@ void DecodeBLENDMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
}
}
-/// DecodeVPERMMask - this decodes the shuffle masks for VPERMQ/VPERMPD.
-/// No VT provided since it only works on 256-bit, 4 element vectors.
-void DecodeVPERMMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
- for (unsigned i = 0; i != 4; ++i) {
- ShuffleMask.push_back((Imm >> (2 * i)) & 3);
+void DecodeVPPERMMask(ArrayRef<uint64_t> RawMask,
+ SmallVectorImpl<int> &ShuffleMask) {
+ assert(RawMask.size() == 16 && "Illegal VPPERM shuffle mask size");
+
+ // VPPERM Operation
+ // Bits[4:0] - Byte Index (0 - 31)
+ // Bits[7:5] - Permute Operation
+ //
+ // Permute Operation:
+ // 0 - Source byte (no logical operation).
+ // 1 - Invert source byte.
+ // 2 - Bit reverse of source byte.
+ // 3 - Bit reverse of inverted source byte.
+ // 4 - 00h (zero - fill).
+ // 5 - FFh (ones - fill).
+ // 6 - Most significant bit of source byte replicated in all bit positions.
+ // 7 - Invert most significant bit of source byte and replicate in all bit positions.
+ for (int i = 0, e = RawMask.size(); i < e; ++i) {
+ uint64_t M = RawMask[i];
+ if (M == (uint64_t)SM_SentinelUndef) {
+ ShuffleMask.push_back(M);
+ continue;
+ }
+
+ uint64_t PermuteOp = (M >> 5) & 0x7;
+ if (PermuteOp == 4) {
+ ShuffleMask.push_back(SM_SentinelZero);
+ continue;
+ }
+ if (PermuteOp != 0) {
+ ShuffleMask.clear();
+ return;
+ }
+
+ uint64_t Index = M & 0x1F;
+ ShuffleMask.push_back((int)Index);
}
}
-void DecodeZeroExtendMask(MVT SrcVT, MVT DstVT, SmallVectorImpl<int> &Mask) {
+/// DecodeVPERMMask - this decodes the shuffle masks for VPERMQ/VPERMPD.
+void DecodeVPERMMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
+ assert((VT.is256BitVector() || VT.is512BitVector()) &&
+ (VT.getScalarSizeInBits() == 64) && "Unexpected vector value type");
+ unsigned NumElts = VT.getVectorNumElements();
+ for (unsigned l = 0; l != NumElts; l += 4)
+ for (unsigned i = 0; i != 4; ++i)
+ ShuffleMask.push_back(l + ((Imm >> (2 * i)) & 3));
+}
+
+void DecodeZeroExtendMask(MVT SrcScalarVT, MVT DstVT, SmallVectorImpl<int> &Mask) {
unsigned NumDstElts = DstVT.getVectorNumElements();
- unsigned SrcScalarBits = SrcVT.getScalarSizeInBits();
+ unsigned SrcScalarBits = SrcScalarVT.getSizeInBits();
unsigned DstScalarBits = DstVT.getScalarSizeInBits();
unsigned Scale = DstScalarBits / SrcScalarBits;
assert(SrcScalarBits < DstScalarBits &&
"Expected zero extension mask to increase scalar size");
- assert(SrcVT.getVectorNumElements() >= NumDstElts &&
- "Too many zero extension lanes");
for (unsigned i = 0; i != NumDstElts; i++) {
Mask.push_back(i);
@@ -445,18 +515,78 @@ void DecodeINSERTQIMask(int Len, int Idx,
ShuffleMask.push_back(SM_SentinelUndef);
}
+void DecodeVPERMILPMask(MVT VT, ArrayRef<uint64_t> RawMask,
+ SmallVectorImpl<int> &ShuffleMask) {
+ unsigned VecSize = VT.getSizeInBits();
+ unsigned EltSize = VT.getScalarSizeInBits();
+ unsigned NumLanes = VecSize / 128;
+ unsigned NumEltsPerLane = VT.getVectorNumElements() / NumLanes;
+ assert((VecSize == 128 || VecSize == 256 || VecSize == 512) &&
+ "Unexpected vector size");
+ assert((EltSize == 32 || EltSize == 64) && "Unexpected element size");
+
+ for (unsigned i = 0, e = RawMask.size(); i < e; ++i) {
+ uint64_t M = RawMask[i];
+ M = (EltSize == 64 ? ((M >> 1) & 0x1) : (M & 0x3));
+ unsigned LaneOffset = i & ~(NumEltsPerLane - 1);
+ ShuffleMask.push_back((int)(LaneOffset + M));
+ }
+}
+
+void DecodeVPERMIL2PMask(MVT VT, unsigned M2Z, ArrayRef<uint64_t> RawMask,
+ SmallVectorImpl<int> &ShuffleMask) {
+ unsigned VecSize = VT.getSizeInBits();
+ unsigned EltSize = VT.getScalarSizeInBits();
+ unsigned NumLanes = VecSize / 128;
+ unsigned NumEltsPerLane = VT.getVectorNumElements() / NumLanes;
+ assert((VecSize == 128 || VecSize == 256) &&
+ "Unexpected vector size");
+ assert((EltSize == 32 || EltSize == 64) && "Unexpected element size");
+
+ for (unsigned i = 0, e = RawMask.size(); i < e; ++i) {
+ // VPERMIL2 Operation.
+ // Bits[3] - Match Bit.
+ // Bits[2:1] - (Per Lane) PD Shuffle Mask.
+ // Bits[2:0] - (Per Lane) PS Shuffle Mask.
+ uint64_t Selector = RawMask[i];
+ unsigned MatchBit = (Selector >> 3) & 0x1;
+
+ // M2Z[0:1] MatchBit
+ // 0Xb X Source selected by Selector index.
+ // 10b 0 Source selected by Selector index.
+ // 10b 1 Zero.
+ // 11b 0 Zero.
+ // 11b 1 Source selected by Selector index.
+ if ((M2Z & 0x2) != 0 && MatchBit != (M2Z & 0x1)) {
+ ShuffleMask.push_back(SM_SentinelZero);
+ continue;
+ }
+
+ unsigned Index = i & ~(NumEltsPerLane - 1);
+ if (EltSize == 64)
+ Index += (Selector >> 1) & 0x1;
+ else
+ Index += Selector & 0x3;
+
+ unsigned SrcOffset = (Selector >> 2) & 1;
+ ShuffleMask.push_back((int)(SrcOffset + Index));
+ }
+}
+
void DecodeVPERMVMask(ArrayRef<uint64_t> RawMask,
SmallVectorImpl<int> &ShuffleMask) {
- for (int i = 0, e = RawMask.size(); i < e; ++i) {
- uint64_t M = RawMask[i];
+ uint64_t EltMaskSize = RawMask.size() - 1;
+ for (auto M : RawMask) {
+ M &= EltMaskSize;
ShuffleMask.push_back((int)M);
}
}
void DecodeVPERMV3Mask(ArrayRef<uint64_t> RawMask,
SmallVectorImpl<int> &ShuffleMask) {
- for (int i = 0, e = RawMask.size(); i < e; ++i) {
- uint64_t M = RawMask[i];
+ uint64_t EltMaskSize = (RawMask.size() * 2) - 1;
+ for (auto M : RawMask) {
+ M &= EltMaskSize;
ShuffleMask.push_back((int)M);
}
}
diff --git a/lib/Target/X86/Utils/X86ShuffleDecode.h b/lib/Target/X86/Utils/X86ShuffleDecode.h
index 72db6a81912b6..dc21c19752c35 100644
--- a/lib/Target/X86/Utils/X86ShuffleDecode.h
+++ b/lib/Target/X86/Utils/X86ShuffleDecode.h
@@ -16,23 +16,31 @@
#define LLVM_LIB_TARGET_X86_UTILS_X86SHUFFLEDECODE_H
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/ArrayRef.h"
//===----------------------------------------------------------------------===//
// Vector Mask Decoding
//===----------------------------------------------------------------------===//
namespace llvm {
+template <typename T> class ArrayRef;
class MVT;
enum { SM_SentinelUndef = -1, SM_SentinelZero = -2 };
+/// Decode a 128-bit INSERTPS instruction as a v4f32 shuffle mask.
void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-// <3,1> or <6,7,2,3>
+// Insert the bottom Len elements from a second source into a vector starting at
+// element Idx.
+void DecodeInsertElementMask(MVT VT, unsigned Idx, unsigned Len,
+ SmallVectorImpl<int> &ShuffleMask);
+
+/// Decode a MOVHLPS instruction as a v2f64/v4f32 shuffle mask.
+/// i.e. <3,1> or <6,7,2,3>
void DecodeMOVHLPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask);
-// <0,2> or <0,1,4,5>
+/// Decode a MOVLHPS instruction as a v2f64/v4f32 shuffle mask.
+/// i.e. <0,2> or <0,1,4,5>
void DecodeMOVLHPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask);
void DecodeMOVSLDUPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
@@ -47,74 +55,104 @@ void DecodePSRLDQMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
void DecodePALIGNRMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+/// Decodes the shuffle masks for pshufd/pshufw/vpermilpd/vpermilps.
+/// VT indicates the type of the vector allowing it to handle different
+/// datatypes and vector widths.
void DecodePSHUFMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+/// Decodes the shuffle masks for pshufhw.
+/// VT indicates the type of the vector allowing it to handle different
+/// datatypes and vector widths.
void DecodePSHUFHWMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-void DecodePSHUFLWMask(MVT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+/// Decodes the shuffle masks for pshuflw.
+/// VT indicates the type of the vector allowing it to handle different
+/// datatypes and vector widths.
+void DecodePSHUFLWMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decodes a PSWAPD 3DNow! instruction.
+/// Decodes a PSWAPD 3DNow! instruction.
void DecodePSWAPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
-/// DecodeSHUFPMask - This decodes the shuffle masks for shufp*. VT indicates
-/// the type of the vector allowing it to handle different datatypes and vector
-/// widths.
+/// Decodes the shuffle masks for shufp*.
+/// VT indicates the type of the vector allowing it to handle different
+/// datatypes and vector widths.
void DecodeSHUFPMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-/// DecodeUNPCKHMask - This decodes the shuffle masks for unpckhps/unpckhpd
-/// and punpckh*. VT indicates the type of the vector allowing it to handle
-/// different datatypes and vector widths.
+/// Decodes the shuffle masks for unpckhps/unpckhpd and punpckh*.
+/// VT indicates the type of the vector allowing it to handle different
+/// datatypes and vector widths.
void DecodeUNPCKHMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
-/// DecodeUNPCKLMask - This decodes the shuffle masks for unpcklps/unpcklpd
-/// and punpckl*. VT indicates the type of the vector allowing it to handle
-/// different datatypes and vector widths.
+/// Decodes the shuffle masks for unpcklps/unpcklpd and punpckl*.
+/// VT indicates the type of the vector allowing it to handle different
+/// datatypes and vector widths.
void DecodeUNPCKLMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a PSHUFB mask from a raw array of constants such as from
+/// Decodes a broadcast of the first element of a vector.
+void DecodeVectorBroadcast(MVT DstVT, SmallVectorImpl<int> &ShuffleMask);
+
+/// Decodes a broadcast of a subvector to a larger vector type.
+void DecodeSubVectorBroadcast(MVT DstVT, MVT SrcVT,
+ SmallVectorImpl<int> &ShuffleMask);
+
+/// Decode a PSHUFB mask from a raw array of constants such as from
/// BUILD_VECTOR.
void DecodePSHUFBMask(ArrayRef<uint64_t> RawMask,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a BLEND immediate mask into a shuffle mask.
+/// Decode a BLEND immediate mask into a shuffle mask.
void DecodeBLENDMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
void DecodeVPERM2X128Mask(MVT VT, unsigned Imm,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a shuffle packed values at 128-bit granularity
+/// Decode a shuffle packed values at 128-bit granularity
/// immediate mask into a shuffle mask.
void decodeVSHUF64x2FamilyMask(MVT VT, unsigned Imm,
SmallVectorImpl<int> &ShuffleMask);
-/// DecodeVPERMMask - this decodes the shuffle masks for VPERMQ/VPERMPD.
-/// No VT provided since it only works on 256-bit, 4 element vectors.
-void DecodeVPERMMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+/// Decodes the shuffle masks for VPERMQ/VPERMPD.
+void DecodeVPERMMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a zero extension instruction as a shuffle mask.
-void DecodeZeroExtendMask(MVT SrcVT, MVT DstVT,
+/// Decode a VPPERM mask from a raw array of constants such as from
+/// BUILD_VECTOR.
+/// This can only basic masks (permutes + zeros), not any of the other
+/// operations that VPPERM can perform.
+void DecodeVPPERMMask(ArrayRef<uint64_t> RawMask,
+ SmallVectorImpl<int> &ShuffleMask);
+
+/// Decode a zero extension instruction as a shuffle mask.
+void DecodeZeroExtendMask(MVT SrcScalarVT, MVT DstVT,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a move lower and zero upper instruction as a shuffle mask.
+/// Decode a move lower and zero upper instruction as a shuffle mask.
void DecodeZeroMoveLowMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a scalar float move instruction as a shuffle mask.
+/// Decode a scalar float move instruction as a shuffle mask.
void DecodeScalarMoveMask(MVT VT, bool IsLoad,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a SSE4A EXTRQ instruction as a v16i8 shuffle mask.
+/// Decode a SSE4A EXTRQ instruction as a v16i8 shuffle mask.
void DecodeEXTRQIMask(int Len, int Idx,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a SSE4A INSERTQ instruction as a v16i8 shuffle mask.
+/// Decode a SSE4A INSERTQ instruction as a v16i8 shuffle mask.
void DecodeINSERTQIMask(int Len, int Idx,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a VPERM W/D/Q/PS/PD mask from a raw array of constants.
+/// Decode a VPERMILPD/VPERMILPS variable mask from a raw array of constants.
+void DecodeVPERMILPMask(MVT VT, ArrayRef<uint64_t> RawMask,
+ SmallVectorImpl<int> &ShuffleMask);
+
+/// Decode a VPERMIL2PD/VPERMIL2PS variable mask from a raw array of constants.
+void DecodeVPERMIL2PMask(MVT VT, unsigned M2Z, ArrayRef<uint64_t> RawMask,
+ SmallVectorImpl<int> &ShuffleMask);
+
+/// Decode a VPERM W/D/Q/PS/PD mask from a raw array of constants.
void DecodeVPERMVMask(ArrayRef<uint64_t> RawMask,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a VPERMT2 W/D/Q/PS/PD mask from a raw array of constants.
+/// Decode a VPERMT2 W/D/Q/PS/PD mask from a raw array of constants.
void DecodeVPERMV3Mask(ArrayRef<uint64_t> RawMask,
SmallVectorImpl<int> &ShuffleMask);
} // llvm namespace
diff --git a/lib/Target/X86/X86.h b/lib/Target/X86/X86.h
index 01e65b89f480c..23d6c7120a4b5 100644
--- a/lib/Target/X86/X86.h
+++ b/lib/Target/X86/X86.h
@@ -21,6 +21,7 @@ namespace llvm {
class FunctionPass;
class ImmutablePass;
+class PassRegistry;
class X86TargetMachine;
/// This pass converts a legalized DAG into a X86-specific DAG, ready for
@@ -58,6 +59,12 @@ FunctionPass *createX86FixupLEAs();
/// recalculations.
FunctionPass *createX86OptimizeLEAs();
+/// Return a pass that transforms setcc + movzx pairs into xor + setcc.
+FunctionPass *createX86FixupSetCC();
+
+/// Return a pass that expands WinAlloca pseudo-instructions.
+FunctionPass *createX86WinAllocaExpander();
+
/// Return a pass that optimizes the code-size of x86 call sequences. This is
/// done by replacing esp-relative movs with pushes.
FunctionPass *createX86CallFrameOptimization();
@@ -72,6 +79,14 @@ FunctionPass *createX86WinEHStatePass();
/// must run after prologue/epilogue insertion and before lowering
/// the MachineInstr to MC.
FunctionPass *createX86ExpandPseudoPass();
+
+/// Return a Machine IR pass that selectively replaces
+/// certain byte and word instructions by equivalent 32 bit instructions,
+/// in order to eliminate partial register usage, false dependences on
+/// the upper portions of registers, and to save code size.
+FunctionPass *createX86FixupBWInsts();
+
+void initializeFixupBWInstPassPass(PassRegistry &);
} // End llvm namespace
#endif
diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td
index 8902a8534256d..8267a84518fc6 100644
--- a/lib/Target/X86/X86.td
+++ b/lib/Target/X86/X86.td
@@ -31,6 +31,9 @@ def Mode16Bit : SubtargetFeature<"16bit-mode", "In16BitMode", "true",
// X86 Subtarget features
//===----------------------------------------------------------------------===//
+def FeatureX87 : SubtargetFeature<"x87","HasX87", "true",
+ "Enable X87 float instructions">;
+
def FeatureCMOV : SubtargetFeature<"cmov","HasCMov", "true",
"Enable conditional move instructions">;
@@ -125,6 +128,9 @@ def FeatureCDI : SubtargetFeature<"avx512cd", "HasCDI", "true",
def FeaturePFI : SubtargetFeature<"avx512pf", "HasPFI", "true",
"Enable AVX-512 PreFetch Instructions",
[FeatureAVX512]>;
+def FeaturePREFETCHWT1 : SubtargetFeature<"prefetchwt1", "HasPFPREFETCHWT1",
+ "true",
+ "Prefetch with Intent to Write and T1 Hint">;
def FeatureDQI : SubtargetFeature<"avx512dq", "HasDQI", "true",
"Enable AVX-512 Doubleword and Quadword Instructions",
[FeatureAVX512]>;
@@ -134,6 +140,12 @@ def FeatureBWI : SubtargetFeature<"avx512bw", "HasBWI", "true",
def FeatureVLX : SubtargetFeature<"avx512vl", "HasVLX", "true",
"Enable AVX-512 Vector Length eXtensions",
[FeatureAVX512]>;
+def FeatureVBMI : SubtargetFeature<"avx512vbmi", "HasVBMI", "true",
+ "Enable AVX-512 Vector Bit Manipulation Instructions",
+ [FeatureAVX512]>;
+def FeatureIFMA : SubtargetFeature<"avx512ifma", "HasIFMA", "true",
+ "Enable AVX-512 Integer Fused Multiple-Add",
+ [FeatureAVX512]>;
def FeaturePKU : SubtargetFeature<"pku", "HasPKU", "true",
"Enable protection keys">;
def FeaturePCLMUL : SubtargetFeature<"pclmul", "HasPCLMUL", "true",
@@ -186,6 +198,8 @@ def FeatureRDSEED : SubtargetFeature<"rdseed", "HasRDSEED", "true",
"Support RDSEED instruction">;
def FeatureLAHFSAHF : SubtargetFeature<"sahf", "HasLAHFSAHF", "true",
"Support LAHF and SAHF instructions">;
+def FeatureMWAITX : SubtargetFeature<"mwaitx", "HasMWAITX", "true",
+ "Enable MONITORX/MWAITX timer functionality">;
def FeatureMPX : SubtargetFeature<"mpx", "HasMPX", "true",
"Support MPX instructions">;
def FeatureLEAForSP : SubtargetFeature<"lea-sp", "UseLeaForSP", "true",
@@ -199,6 +213,20 @@ def FeatureSlowDivide64 : SubtargetFeature<"idivq-to-divw",
def FeaturePadShortFunctions : SubtargetFeature<"pad-short-functions",
"PadShortFunctions", "true",
"Pad short functions">;
+def FeatureINVPCID : SubtargetFeature<"invpcid", "HasInvPCId", "true",
+ "Invalidate Process-Context Identifier">;
+def FeatureVMFUNC : SubtargetFeature<"vmfunc", "HasVMFUNC", "true",
+ "VM Functions">;
+def FeatureSMAP : SubtargetFeature<"smap", "HasSMAP", "true",
+ "Supervisor Mode Access Protection">;
+def FeatureSGX : SubtargetFeature<"sgx", "HasSGX", "true",
+ "Enable Software Guard Extensions">;
+def FeatureCLFLUSHOPT : SubtargetFeature<"clflushopt", "HasCLFLUSHOPT", "true",
+ "Flush A Cache Line Optimized">;
+def FeaturePCOMMIT : SubtargetFeature<"pcommit", "HasPCOMMIT", "true",
+ "Enable Persistent Commit">;
+def FeatureCLWB : SubtargetFeature<"clwb", "HasCLWB", "true",
+ "Cache Line Write Back">;
// TODO: This feature ought to be renamed.
// What it really refers to are CPUs for which certain instructions
// (which ones besides the example below?) are microcoded.
@@ -216,6 +244,11 @@ def FeatureSlowIncDec : SubtargetFeature<"slow-incdec", "SlowIncDec", "true",
def FeatureSoftFloat
: SubtargetFeature<"soft-float", "UseSoftFloat", "true",
"Use software floating point features.">;
+// On at least some AMD processors, there is no performance hazard to writing
+// only the lower parts of a YMM register without clearing the upper part.
+def FeatureFastPartialYMMWrite
+ : SubtargetFeature<"fast-partial-ymm-write", "HasFastPartialYMMWrite",
+ "true", "Partial writes to YMM registers are fast">;
//===----------------------------------------------------------------------===//
// X86 processors supported.
@@ -231,37 +264,57 @@ def ProcIntelSLM : SubtargetFeature<"slm", "X86ProcFamily", "IntelSLM",
class Proc<string Name, list<SubtargetFeature> Features>
: ProcessorModel<Name, GenericModel, Features>;
-def : Proc<"generic", [FeatureSlowUAMem16]>;
-def : Proc<"i386", [FeatureSlowUAMem16]>;
-def : Proc<"i486", [FeatureSlowUAMem16]>;
-def : Proc<"i586", [FeatureSlowUAMem16]>;
-def : Proc<"pentium", [FeatureSlowUAMem16]>;
-def : Proc<"pentium-mmx", [FeatureSlowUAMem16, FeatureMMX]>;
-def : Proc<"i686", [FeatureSlowUAMem16]>;
-def : Proc<"pentiumpro", [FeatureSlowUAMem16, FeatureCMOV]>;
-def : Proc<"pentium2", [FeatureSlowUAMem16, FeatureMMX, FeatureCMOV,
- FeatureFXSR]>;
-def : Proc<"pentium3", [FeatureSlowUAMem16, FeatureMMX, FeatureSSE1,
- FeatureFXSR]>;
-def : Proc<"pentium3m", [FeatureSlowUAMem16, FeatureMMX, FeatureSSE1,
- FeatureFXSR, FeatureSlowBTMem]>;
-def : Proc<"pentium-m", [FeatureSlowUAMem16, FeatureMMX, FeatureSSE2,
- FeatureFXSR, FeatureSlowBTMem]>;
-def : Proc<"pentium4", [FeatureSlowUAMem16, FeatureMMX, FeatureSSE2,
- FeatureFXSR]>;
-def : Proc<"pentium4m", [FeatureSlowUAMem16, FeatureMMX, FeatureSSE2,
- FeatureFXSR, FeatureSlowBTMem]>;
+def : Proc<"generic", [FeatureX87, FeatureSlowUAMem16]>;
+def : Proc<"i386", [FeatureX87, FeatureSlowUAMem16]>;
+def : Proc<"i486", [FeatureX87, FeatureSlowUAMem16]>;
+def : Proc<"i586", [FeatureX87, FeatureSlowUAMem16]>;
+def : Proc<"pentium", [FeatureX87, FeatureSlowUAMem16]>;
+def : Proc<"pentium-mmx", [FeatureX87, FeatureSlowUAMem16, FeatureMMX]>;
+def : Proc<"i686", [FeatureX87, FeatureSlowUAMem16]>;
+def : Proc<"pentiumpro", [FeatureX87, FeatureSlowUAMem16, FeatureCMOV]>;
+def : Proc<"pentium2", [FeatureX87, FeatureSlowUAMem16, FeatureMMX,
+ FeatureCMOV, FeatureFXSR]>;
+def : Proc<"pentium3", [FeatureX87, FeatureSlowUAMem16, FeatureMMX,
+ FeatureSSE1, FeatureFXSR]>;
+def : Proc<"pentium3m", [FeatureX87, FeatureSlowUAMem16, FeatureMMX,
+ FeatureSSE1, FeatureFXSR, FeatureSlowBTMem]>;
+
+// Enable the PostRAScheduler for SSE2 and SSE3 class cpus.
+// The intent is to enable it for pentium4 which is the current default
+// processor in a vanilla 32-bit clang compilation when no specific
+// architecture is specified. This generally gives a nice performance
+// increase on silvermont, with largely neutral behavior on other
+// contemporary large core processors.
+// pentium-m, pentium4m, prescott and nocona are included as a preventative
+// measure to avoid performance surprises, in case clang's default cpu
+// changes slightly.
+
+def : ProcessorModel<"pentium-m", GenericPostRAModel,
+ [FeatureX87, FeatureSlowUAMem16, FeatureMMX,
+ FeatureSSE2, FeatureFXSR, FeatureSlowBTMem]>;
+
+def : ProcessorModel<"pentium4", GenericPostRAModel,
+ [FeatureX87, FeatureSlowUAMem16, FeatureMMX,
+ FeatureSSE2, FeatureFXSR]>;
+
+def : ProcessorModel<"pentium4m", GenericPostRAModel,
+ [FeatureX87, FeatureSlowUAMem16, FeatureMMX,
+ FeatureSSE2, FeatureFXSR, FeatureSlowBTMem]>;
+
+// Intel Quark.
+def : Proc<"lakemont", []>;
// Intel Core Duo.
def : ProcessorModel<"yonah", SandyBridgeModel,
- [FeatureSlowUAMem16, FeatureMMX, FeatureSSE3, FeatureFXSR,
- FeatureSlowBTMem]>;
+ [FeatureX87, FeatureSlowUAMem16, FeatureMMX, FeatureSSE3,
+ FeatureFXSR, FeatureSlowBTMem]>;
// NetBurst.
-def : Proc<"prescott",
- [FeatureSlowUAMem16, FeatureMMX, FeatureSSE3, FeatureFXSR,
- FeatureSlowBTMem]>;
-def : Proc<"nocona", [
+def : ProcessorModel<"prescott", GenericPostRAModel,
+ [FeatureX87, FeatureSlowUAMem16, FeatureMMX, FeatureSSE3,
+ FeatureFXSR, FeatureSlowBTMem]>;
+def : ProcessorModel<"nocona", GenericPostRAModel, [
+ FeatureX87,
FeatureSlowUAMem16,
FeatureMMX,
FeatureSSE3,
@@ -272,6 +325,7 @@ def : Proc<"nocona", [
// Intel Core 2 Solo/Duo.
def : ProcessorModel<"core2", SandyBridgeModel, [
+ FeatureX87,
FeatureSlowUAMem16,
FeatureMMX,
FeatureSSSE3,
@@ -281,6 +335,7 @@ def : ProcessorModel<"core2", SandyBridgeModel, [
FeatureLAHFSAHF
]>;
def : ProcessorModel<"penryn", SandyBridgeModel, [
+ FeatureX87,
FeatureSlowUAMem16,
FeatureMMX,
FeatureSSE41,
@@ -293,6 +348,7 @@ def : ProcessorModel<"penryn", SandyBridgeModel, [
// Atom CPUs.
class BonnellProc<string Name> : ProcessorModel<Name, AtomModel, [
ProcIntelAtom,
+ FeatureX87,
FeatureSlowUAMem16,
FeatureMMX,
FeatureSSSE3,
@@ -313,6 +369,7 @@ def : BonnellProc<"atom">; // Pin the generic name to the baseline.
class SilvermontProc<string Name> : ProcessorModel<Name, SLMModel, [
ProcIntelSLM,
+ FeatureX87,
FeatureMMX,
FeatureSSE42,
FeatureFXSR,
@@ -334,6 +391,7 @@ def : SilvermontProc<"slm">; // Legacy alias.
// "Arrandale" along with corei3 and corei5
class NehalemProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
+ FeatureX87,
FeatureMMX,
FeatureSSE42,
FeatureFXSR,
@@ -348,6 +406,7 @@ def : NehalemProc<"corei7">;
// Westmere is a similar machine to nehalem with some additional features.
// Westmere is the corei3/i5/i7 path from nehalem to sandybridge
class WestmereProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
+ FeatureX87,
FeatureMMX,
FeatureSSE42,
FeatureFXSR,
@@ -360,15 +419,24 @@ class WestmereProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
]>;
def : WestmereProc<"westmere">;
+class ProcessorFeatures<list<SubtargetFeature> Inherited,
+ list<SubtargetFeature> NewFeatures> {
+ list<SubtargetFeature> Value = !listconcat(Inherited, NewFeatures);
+}
+
+class ProcModel<string Name, SchedMachineModel Model,
+ list<SubtargetFeature> ProcFeatures,
+ list<SubtargetFeature> OtherFeatures> :
+ ProcessorModel<Name, Model, !listconcat(ProcFeatures, OtherFeatures)>;
+
// SSE is not listed here since llvm treats AVX as a reimplementation of SSE,
// rather than a superset.
-class SandyBridgeProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
+def SNBFeatures : ProcessorFeatures<[], [
+ FeatureX87,
FeatureMMX,
FeatureAVX,
FeatureFXSR,
FeatureCMPXCHG16B,
- FeatureSlowBTMem,
- FeatureSlowUAMem32,
FeaturePOPCNT,
FeatureAES,
FeaturePCLMUL,
@@ -376,198 +444,166 @@ class SandyBridgeProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
FeatureXSAVEOPT,
FeatureLAHFSAHF
]>;
+
+class SandyBridgeProc<string Name> : ProcModel<Name, SandyBridgeModel,
+ SNBFeatures.Value, [
+ FeatureSlowBTMem,
+ FeatureSlowUAMem32
+]>;
def : SandyBridgeProc<"sandybridge">;
def : SandyBridgeProc<"corei7-avx">; // Legacy alias.
-class IvyBridgeProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
- FeatureMMX,
- FeatureAVX,
- FeatureFXSR,
- FeatureCMPXCHG16B,
- FeatureSlowBTMem,
- FeatureSlowUAMem32,
- FeaturePOPCNT,
- FeatureAES,
- FeaturePCLMUL,
- FeatureXSAVE,
- FeatureXSAVEOPT,
+def IVBFeatures : ProcessorFeatures<SNBFeatures.Value, [
FeatureRDRAND,
FeatureF16C,
- FeatureFSGSBase,
- FeatureLAHFSAHF
+ FeatureFSGSBase
+]>;
+
+class IvyBridgeProc<string Name> : ProcModel<Name, SandyBridgeModel,
+ IVBFeatures.Value, [
+ FeatureSlowBTMem,
+ FeatureSlowUAMem32
]>;
def : IvyBridgeProc<"ivybridge">;
def : IvyBridgeProc<"core-avx-i">; // Legacy alias.
-class HaswellProc<string Name> : ProcessorModel<Name, HaswellModel, [
- FeatureMMX,
+def HSWFeatures : ProcessorFeatures<IVBFeatures.Value, [
FeatureAVX2,
- FeatureFXSR,
- FeatureCMPXCHG16B,
- FeatureSlowBTMem,
- FeaturePOPCNT,
- FeatureAES,
- FeaturePCLMUL,
- FeatureRDRAND,
- FeatureXSAVE,
- FeatureXSAVEOPT,
- FeatureF16C,
- FeatureFSGSBase,
- FeatureMOVBE,
- FeatureLZCNT,
FeatureBMI,
FeatureBMI2,
FeatureFMA,
+ FeatureLZCNT,
+ FeatureMOVBE,
+ FeatureINVPCID,
+ FeatureVMFUNC,
FeatureRTM,
FeatureHLE,
- FeatureSlowIncDec,
- FeatureLAHFSAHF
+ FeatureSlowIncDec
]>;
+
+class HaswellProc<string Name> : ProcModel<Name, HaswellModel,
+ HSWFeatures.Value, []>;
def : HaswellProc<"haswell">;
def : HaswellProc<"core-avx2">; // Legacy alias.
-class BroadwellProc<string Name> : ProcessorModel<Name, HaswellModel, [
- FeatureMMX,
- FeatureAVX2,
- FeatureFXSR,
- FeatureCMPXCHG16B,
- FeatureSlowBTMem,
- FeaturePOPCNT,
- FeatureAES,
- FeaturePCLMUL,
- FeatureXSAVE,
- FeatureXSAVEOPT,
- FeatureRDRAND,
- FeatureF16C,
- FeatureFSGSBase,
- FeatureMOVBE,
- FeatureLZCNT,
- FeatureBMI,
- FeatureBMI2,
- FeatureFMA,
- FeatureRTM,
- FeatureHLE,
+def BDWFeatures : ProcessorFeatures<HSWFeatures.Value, [
FeatureADX,
FeatureRDSEED,
- FeatureSlowIncDec,
- FeatureLAHFSAHF
+ FeatureSMAP
]>;
+class BroadwellProc<string Name> : ProcModel<Name, HaswellModel,
+ BDWFeatures.Value, []>;
def : BroadwellProc<"broadwell">;
+def SKLFeatures : ProcessorFeatures<BDWFeatures.Value, [
+ FeatureMPX,
+ FeatureXSAVEC,
+ FeatureXSAVES,
+ FeatureSGX,
+ FeatureCLFLUSHOPT
+]>;
+
+// FIXME: define SKL model
+class SkylakeClientProc<string Name> : ProcModel<Name, HaswellModel,
+ SKLFeatures.Value, []>;
+def : SkylakeClientProc<"skylake">;
+
// FIXME: define KNL model
-class KnightsLandingProc<string Name> : ProcessorModel<Name, HaswellModel, [
- FeatureMMX,
+class KnightsLandingProc<string Name> : ProcModel<Name, HaswellModel,
+ IVBFeatures.Value, [
FeatureAVX512,
- FeatureFXSR,
FeatureERI,
FeatureCDI,
FeaturePFI,
- FeatureCMPXCHG16B,
- FeaturePOPCNT,
- FeatureAES,
- FeaturePCLMUL,
- FeatureXSAVE,
- FeatureXSAVEOPT,
- FeatureRDRAND,
- FeatureF16C,
- FeatureFSGSBase,
+ FeaturePREFETCHWT1,
+ FeatureADX,
+ FeatureRDSEED,
FeatureMOVBE,
FeatureLZCNT,
FeatureBMI,
FeatureBMI2,
- FeatureFMA,
- FeatureRTM,
- FeatureHLE,
- FeatureSlowIncDec,
- FeatureMPX,
- FeatureLAHFSAHF
+ FeatureFMA
]>;
def : KnightsLandingProc<"knl">;
-// FIXME: define SKX model
-class SkylakeProc<string Name> : ProcessorModel<Name, HaswellModel, [
- FeatureMMX,
+def SKXFeatures : ProcessorFeatures<SKLFeatures.Value, [
FeatureAVX512,
- FeatureFXSR,
FeatureCDI,
FeatureDQI,
FeatureBWI,
FeatureVLX,
FeaturePKU,
- FeatureCMPXCHG16B,
- FeatureSlowBTMem,
- FeaturePOPCNT,
- FeatureAES,
- FeaturePCLMUL,
- FeatureXSAVE,
- FeatureXSAVEOPT,
- FeatureRDRAND,
- FeatureF16C,
- FeatureFSGSBase,
- FeatureMOVBE,
- FeatureLZCNT,
- FeatureBMI,
- FeatureBMI2,
- FeatureFMA,
- FeatureRTM,
- FeatureHLE,
- FeatureADX,
- FeatureRDSEED,
- FeatureSlowIncDec,
- FeatureMPX,
- FeatureXSAVEC,
- FeatureXSAVES,
- FeatureLAHFSAHF
+ FeaturePCOMMIT,
+ FeatureCLWB
]>;
-def : SkylakeProc<"skylake">;
-def : SkylakeProc<"skx">; // Legacy alias.
+// FIXME: define SKX model
+class SkylakeServerProc<string Name> : ProcModel<Name, HaswellModel,
+ SKXFeatures.Value, []>;
+def : SkylakeServerProc<"skylake-avx512">;
+def : SkylakeServerProc<"skx">; // Legacy alias.
+
+def CNLFeatures : ProcessorFeatures<SKXFeatures.Value, [
+ FeatureVBMI,
+ FeatureIFMA,
+ FeatureSHA
+]>;
+
+class CannonlakeProc<string Name> : ProcModel<Name, HaswellModel,
+ CNLFeatures.Value, []>;
+def : CannonlakeProc<"cannonlake">;
// AMD CPUs.
-def : Proc<"k6", [FeatureSlowUAMem16, FeatureMMX]>;
-def : Proc<"k6-2", [FeatureSlowUAMem16, Feature3DNow]>;
-def : Proc<"k6-3", [FeatureSlowUAMem16, Feature3DNow]>;
-def : Proc<"athlon", [FeatureSlowUAMem16, Feature3DNowA,
+def : Proc<"k6", [FeatureX87, FeatureSlowUAMem16, FeatureMMX]>;
+def : Proc<"k6-2", [FeatureX87, FeatureSlowUAMem16, Feature3DNow]>;
+def : Proc<"k6-3", [FeatureX87, FeatureSlowUAMem16, Feature3DNow]>;
+def : Proc<"athlon", [FeatureX87, FeatureSlowUAMem16, Feature3DNowA,
FeatureSlowBTMem, FeatureSlowSHLD]>;
-def : Proc<"athlon-tbird", [FeatureSlowUAMem16, Feature3DNowA,
+def : Proc<"athlon-tbird", [FeatureX87, FeatureSlowUAMem16, Feature3DNowA,
FeatureSlowBTMem, FeatureSlowSHLD]>;
-def : Proc<"athlon-4", [FeatureSlowUAMem16, FeatureSSE1, Feature3DNowA,
- FeatureFXSR, FeatureSlowBTMem, FeatureSlowSHLD]>;
-def : Proc<"athlon-xp", [FeatureSlowUAMem16, FeatureSSE1, Feature3DNowA,
- FeatureFXSR, FeatureSlowBTMem, FeatureSlowSHLD]>;
-def : Proc<"athlon-mp", [FeatureSlowUAMem16, FeatureSSE1, Feature3DNowA,
- FeatureFXSR, FeatureSlowBTMem, FeatureSlowSHLD]>;
-def : Proc<"k8", [FeatureSlowUAMem16, FeatureSSE2, Feature3DNowA,
- FeatureFXSR, Feature64Bit, FeatureSlowBTMem,
- FeatureSlowSHLD]>;
-def : Proc<"opteron", [FeatureSlowUAMem16, FeatureSSE2, Feature3DNowA,
- FeatureFXSR, Feature64Bit, FeatureSlowBTMem,
- FeatureSlowSHLD]>;
-def : Proc<"athlon64", [FeatureSlowUAMem16, FeatureSSE2, Feature3DNowA,
- FeatureFXSR, Feature64Bit, FeatureSlowBTMem,
+def : Proc<"athlon-4", [FeatureX87, FeatureSlowUAMem16, FeatureSSE1,
+ Feature3DNowA, FeatureFXSR, FeatureSlowBTMem,
FeatureSlowSHLD]>;
-def : Proc<"athlon-fx", [FeatureSlowUAMem16, FeatureSSE2, Feature3DNowA,
- FeatureFXSR, Feature64Bit, FeatureSlowBTMem,
+def : Proc<"athlon-xp", [FeatureX87, FeatureSlowUAMem16, FeatureSSE1,
+ Feature3DNowA, FeatureFXSR, FeatureSlowBTMem,
FeatureSlowSHLD]>;
-def : Proc<"k8-sse3", [FeatureSlowUAMem16, FeatureSSE3, Feature3DNowA,
- FeatureFXSR, FeatureCMPXCHG16B, FeatureSlowBTMem,
+def : Proc<"athlon-mp", [FeatureX87, FeatureSlowUAMem16, FeatureSSE1,
+ Feature3DNowA, FeatureFXSR, FeatureSlowBTMem,
FeatureSlowSHLD]>;
-def : Proc<"opteron-sse3", [FeatureSlowUAMem16, FeatureSSE3, Feature3DNowA,
- FeatureFXSR, FeatureCMPXCHG16B, FeatureSlowBTMem,
- FeatureSlowSHLD]>;
-def : Proc<"athlon64-sse3", [FeatureSlowUAMem16, FeatureSSE3, Feature3DNowA,
- FeatureFXSR, FeatureCMPXCHG16B, FeatureSlowBTMem,
- FeatureSlowSHLD]>;
-def : Proc<"amdfam10", [FeatureSSE4A, Feature3DNowA, FeatureFXSR,
- FeatureCMPXCHG16B, FeatureLZCNT, FeaturePOPCNT,
- FeatureSlowBTMem, FeatureSlowSHLD, FeatureLAHFSAHF]>;
-def : Proc<"barcelona", [FeatureSSE4A, Feature3DNowA, FeatureFXSR,
- FeatureCMPXCHG16B, FeatureLZCNT, FeaturePOPCNT,
- FeatureSlowBTMem, FeatureSlowSHLD, FeatureLAHFSAHF]>;
+def : Proc<"k8", [FeatureX87, FeatureSlowUAMem16, FeatureSSE2,
+ Feature3DNowA, FeatureFXSR, Feature64Bit,
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"opteron", [FeatureX87, FeatureSlowUAMem16, FeatureSSE2,
+ Feature3DNowA, FeatureFXSR, Feature64Bit,
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"athlon64", [FeatureX87, FeatureSlowUAMem16, FeatureSSE2,
+ Feature3DNowA, FeatureFXSR, Feature64Bit,
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"athlon-fx", [FeatureX87, FeatureSlowUAMem16, FeatureSSE2,
+ Feature3DNowA, FeatureFXSR, Feature64Bit,
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"k8-sse3", [FeatureX87, FeatureSlowUAMem16, FeatureSSE3,
+ Feature3DNowA, FeatureFXSR, FeatureCMPXCHG16B,
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"opteron-sse3", [FeatureX87, FeatureSlowUAMem16, FeatureSSE3,
+ Feature3DNowA, FeatureFXSR, FeatureCMPXCHG16B,
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"athlon64-sse3", [FeatureX87, FeatureSlowUAMem16, FeatureSSE3,
+ Feature3DNowA, FeatureFXSR, FeatureCMPXCHG16B,
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"amdfam10", [FeatureX87, FeatureSSE4A, Feature3DNowA,
+ FeatureFXSR, FeatureCMPXCHG16B, FeatureLZCNT,
+ FeaturePOPCNT, FeatureSlowBTMem, FeatureSlowSHLD,
+ FeatureLAHFSAHF]>;
+def : Proc<"barcelona", [FeatureX87, FeatureSSE4A, Feature3DNowA,
+ FeatureFXSR, FeatureCMPXCHG16B, FeatureLZCNT,
+ FeaturePOPCNT, FeatureSlowBTMem, FeatureSlowSHLD,
+ FeatureLAHFSAHF]>;
// Bobcat
def : Proc<"btver1", [
+ FeatureX87,
FeatureMMX,
FeatureSSSE3,
FeatureSSE4A,
@@ -576,13 +612,13 @@ def : Proc<"btver1", [
FeaturePRFCHW,
FeatureLZCNT,
FeaturePOPCNT,
- FeatureXSAVE,
FeatureSlowSHLD,
FeatureLAHFSAHF
]>;
// Jaguar
def : ProcessorModel<"btver2", BtVer2Model, [
+ FeatureX87,
FeatureMMX,
FeatureAVX,
FeatureFXSR,
@@ -599,11 +635,13 @@ def : ProcessorModel<"btver2", BtVer2Model, [
FeatureXSAVE,
FeatureXSAVEOPT,
FeatureSlowSHLD,
- FeatureLAHFSAHF
+ FeatureLAHFSAHF,
+ FeatureFastPartialYMMWrite
]>;
// Bulldozer
def : Proc<"bdver1", [
+ FeatureX87,
FeatureXOP,
FeatureFMA4,
FeatureCMPXCHG16B,
@@ -622,6 +660,7 @@ def : Proc<"bdver1", [
]>;
// Piledriver
def : Proc<"bdver2", [
+ FeatureX87,
FeatureXOP,
FeatureFMA4,
FeatureCMPXCHG16B,
@@ -645,6 +684,7 @@ def : Proc<"bdver2", [
// Steamroller
def : Proc<"bdver3", [
+ FeatureX87,
FeatureXOP,
FeatureFMA4,
FeatureCMPXCHG16B,
@@ -670,6 +710,7 @@ def : Proc<"bdver3", [
// Excavator
def : Proc<"bdver4", [
+ FeatureX87,
FeatureMMX,
FeatureAVX2,
FeatureFXSR,
@@ -689,15 +730,17 @@ def : Proc<"bdver4", [
FeatureFMA,
FeatureXSAVEOPT,
FeatureFSGSBase,
- FeatureLAHFSAHF
+ FeatureLAHFSAHF,
+ FeatureMWAITX
]>;
-def : Proc<"geode", [FeatureSlowUAMem16, Feature3DNowA]>;
+def : Proc<"geode", [FeatureX87, FeatureSlowUAMem16, Feature3DNowA]>;
-def : Proc<"winchip-c6", [FeatureSlowUAMem16, FeatureMMX]>;
-def : Proc<"winchip2", [FeatureSlowUAMem16, Feature3DNow]>;
-def : Proc<"c3", [FeatureSlowUAMem16, Feature3DNow]>;
-def : Proc<"c3-2", [FeatureSlowUAMem16, FeatureMMX, FeatureSSE1, FeatureFXSR]>;
+def : Proc<"winchip-c6", [FeatureX87, FeatureSlowUAMem16, FeatureMMX]>;
+def : Proc<"winchip2", [FeatureX87, FeatureSlowUAMem16, Feature3DNow]>;
+def : Proc<"c3", [FeatureX87, FeatureSlowUAMem16, Feature3DNow]>;
+def : Proc<"c3-2", [FeatureX87, FeatureSlowUAMem16, FeatureMMX,
+ FeatureSSE1, FeatureFXSR]>;
// We also provide a generic 64-bit specific x86 processor model which tries to
// be good for modern chips without enabling instruction set encodings past the
@@ -710,8 +753,8 @@ def : Proc<"c3-2", [FeatureSlowUAMem16, FeatureMMX, FeatureSSE1, FeatureFXSR]>;
// knobs which need to be tuned differently for AMD chips, we might consider
// forming a common base for them.
def : ProcessorModel<"x86-64", SandyBridgeModel,
- [FeatureMMX, FeatureSSE2, FeatureFXSR, Feature64Bit,
- FeatureSlowBTMem ]>;
+ [FeatureX87, FeatureMMX, FeatureSSE2, FeatureFXSR,
+ Feature64Bit, FeatureSlowBTMem ]>;
//===----------------------------------------------------------------------===//
// Register File Description
diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp
index 2170e62e30fd4..67e51f1e91945 100644
--- a/lib/Target/X86/X86AsmPrinter.cpp
+++ b/lib/Target/X86/X86AsmPrinter.cpp
@@ -17,7 +17,6 @@
#include "MCTargetDesc/X86BaseInfo.h"
#include "X86InstrInfo.h"
#include "X86MachineFunctionInfo.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/CodeGen/MachineValueType.h"
@@ -28,6 +27,7 @@
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSectionCOFF.h"
@@ -50,6 +50,9 @@ bool X86AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
Subtarget = &MF.getSubtarget<X86Subtarget>();
SMShadowTracker.startFunction(MF);
+ CodeEmitter.reset(TM.getTarget().createMCCodeEmitter(
+ *MF.getSubtarget().getInstrInfo(), *MF.getSubtarget().getRegisterInfo(),
+ MF.getContext()));
SetupMachineFunction(MF);
@@ -66,6 +69,9 @@ bool X86AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
// Emit the rest of the function body.
EmitFunctionBody();
+ // Emit the XRay table for this function.
+ EmitXRayTable();
+
// We didn't modify anything.
return false;
}
@@ -85,11 +91,8 @@ static void printSymbolOperand(X86AsmPrinter &P, const MachineOperand &MO,
const GlobalValue *GV = MO.getGlobal();
MCSymbol *GVSym;
- if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB)
- GVSym = P.getSymbolWithGlobalValueBase(GV, "$stub");
- else if (MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY ||
- MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE ||
- MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE)
+ if (MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY ||
+ MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE)
GVSym = P.getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
else
GVSym = P.getSymbol(GV);
@@ -107,21 +110,6 @@ static void printSymbolOperand(X86AsmPrinter &P, const MachineOperand &MO,
if (!StubSym.getPointer())
StubSym = MachineModuleInfoImpl::
StubValueTy(P.getSymbol(GV), !GV->hasInternalLinkage());
- } else if (MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE){
- MCSymbol *Sym = P.getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
- MachineModuleInfoImpl::StubValueTy &StubSym =
- P.MMI->getObjFileInfo<MachineModuleInfoMachO>().getHiddenGVStubEntry(
- Sym);
- if (!StubSym.getPointer())
- StubSym = MachineModuleInfoImpl::
- StubValueTy(P.getSymbol(GV), !GV->hasInternalLinkage());
- } else if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB) {
- MCSymbol *Sym = P.getSymbolWithGlobalValueBase(GV, "$stub");
- MachineModuleInfoImpl::StubValueTy &StubSym =
- P.MMI->getObjFileInfo<MachineModuleInfoMachO>().getFnStubEntry(Sym);
- if (!StubSym.getPointer())
- StubSym = MachineModuleInfoImpl::
- StubValueTy(P.getSymbol(GV), !GV->hasInternalLinkage());
}
// If the name begins with a dollar-sign, enclose it in parens. We do this
@@ -145,7 +133,6 @@ static void printSymbolOperand(X86AsmPrinter &P, const MachineOperand &MO,
break;
case X86II::MO_DARWIN_NONLAZY:
case X86II::MO_DLLIMPORT:
- case X86II::MO_DARWIN_STUB:
// These affect the name of the symbol, not any suffix.
break;
case X86II::MO_GOT_ABSOLUTE_ADDRESS:
@@ -155,7 +142,6 @@ static void printSymbolOperand(X86AsmPrinter &P, const MachineOperand &MO,
break;
case X86II::MO_PIC_BASE_OFFSET:
case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
- case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
O << '-';
P.MF->getPICBaseSymbol()->print(O, P.MAI);
break;
@@ -294,7 +280,7 @@ static void printLeaMemReference(X86AsmPrinter &P, const MachineInstr *MI,
static void printMemReference(X86AsmPrinter &P, const MachineInstr *MI,
unsigned Op, raw_ostream &O,
const char *Modifier = nullptr) {
- assert(isMem(MI, Op) && "Invalid memory reference!");
+ assert(isMem(*MI, Op) && "Invalid memory reference!");
const MachineOperand &Segment = MI->getOperand(Op+X86::AddrSegmentReg);
if (Segment.getReg()) {
printOperand(P, MI, Op+X86::AddrSegmentReg, O, Modifier);
@@ -535,6 +521,12 @@ void X86AsmPrinter::EmitStartOfAsmFile(Module &M) {
}
}
OutStreamer->EmitSyntaxDirective();
+
+ // If this is not inline asm and we're in 16-bit
+ // mode prefix assembly with .code16.
+ bool is16 = TT.getEnvironment() == Triple::CODE16;
+ if (M.getModuleInlineAsm().empty() && is16)
+ OutStreamer->EmitAssemblerFlag(MCAF_Code16);
}
static void
@@ -568,8 +560,9 @@ MCSymbol *X86AsmPrinter::GetCPISymbol(unsigned CPID) const {
const DataLayout &DL = MF->getDataLayout();
SectionKind Kind = CPE.getSectionKind(&DL);
const Constant *C = CPE.Val.ConstVal;
+ unsigned Align = CPE.Alignment;
if (const MCSectionCOFF *S = dyn_cast<MCSectionCOFF>(
- getObjFileLowering().getSectionForConstant(DL, Kind, C))) {
+ getObjFileLowering().getSectionForConstant(DL, Kind, C, Align))) {
if (MCSymbol *Sym = S->getCOMDATSymbol()) {
if (Sym->isUndefined())
OutStreamer->EmitSymbolAttribute(Sym, MCSA_Global);
@@ -593,30 +586,6 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
// Output stubs for dynamically-linked functions.
MachineModuleInfoMachO::SymbolListTy Stubs;
- Stubs = MMIMacho.GetFnStubList();
- if (!Stubs.empty()) {
- MCSection *TheSection = OutContext.getMachOSection(
- "__IMPORT", "__jump_table",
- MachO::S_SYMBOL_STUBS | MachO::S_ATTR_SELF_MODIFYING_CODE |
- MachO::S_ATTR_PURE_INSTRUCTIONS,
- 5, SectionKind::getMetadata());
- OutStreamer->SwitchSection(TheSection);
-
- for (const auto &Stub : Stubs) {
- // L_foo$stub:
- OutStreamer->EmitLabel(Stub.first);
- // .indirect_symbol _foo
- OutStreamer->EmitSymbolAttribute(Stub.second.getPointer(),
- MCSA_IndirectSymbol);
- // hlt; hlt; hlt; hlt; hlt hlt = 0xf4.
- const char HltInsts[] = "\xf4\xf4\xf4\xf4\xf4";
- OutStreamer->EmitBytes(StringRef(HltInsts, 5));
- }
-
- Stubs.clear();
- OutStreamer->AddBlankLine();
- }
-
// Output stubs for external and common global variables.
Stubs = MMIMacho.GetGVStubList();
if (!Stubs.empty()) {
@@ -632,20 +601,6 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
OutStreamer->AddBlankLine();
}
- Stubs = MMIMacho.GetHiddenGVStubList();
- if (!Stubs.empty()) {
- MCSection *TheSection = OutContext.getMachOSection(
- "__IMPORT", "__pointers", MachO::S_NON_LAZY_SYMBOL_POINTERS,
- SectionKind::getMetadata());
- OutStreamer->SwitchSection(TheSection);
-
- for (auto &Stub : Stubs)
- emitNonLazySymbolPointer(*OutStreamer, Stub.first, Stub.second);
-
- Stubs.clear();
- OutStreamer->AddBlankLine();
- }
-
SM.serializeToStackMapSection();
FM.serializeToFaultMapSection();
diff --git a/lib/Target/X86/X86AsmPrinter.h b/lib/Target/X86/X86AsmPrinter.h
index 9c8bd98dbade3..dcb7b5a3466fb 100644
--- a/lib/Target/X86/X86AsmPrinter.h
+++ b/lib/Target/X86/X86AsmPrinter.h
@@ -29,6 +29,7 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
const X86Subtarget *Subtarget;
StackMaps SM;
FaultMaps FM;
+ std::unique_ptr<MCCodeEmitter> CodeEmitter;
// This utility class tracks the length of a stackmap instruction's 'shadow'.
// It is used by the X86AsmPrinter to ensure that the stackmap shadow
@@ -40,10 +41,11 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
// few instruction bytes to cover the shadow are NOPs used for padding.
class StackMapShadowTracker {
public:
- StackMapShadowTracker(TargetMachine &TM);
- ~StackMapShadowTracker();
- void startFunction(MachineFunction &MF);
- void count(MCInst &Inst, const MCSubtargetInfo &STI);
+ void startFunction(MachineFunction &MF) {
+ this->MF = &MF;
+ }
+ void count(MCInst &Inst, const MCSubtargetInfo &STI,
+ MCCodeEmitter *CodeEmitter);
// Called to signal the start of a shadow of RequiredSize bytes.
void reset(unsigned RequiredSize) {
@@ -56,21 +58,40 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
// to emit any necessary padding-NOPs.
void emitShadowPadding(MCStreamer &OutStreamer, const MCSubtargetInfo &STI);
private:
- TargetMachine &TM;
const MachineFunction *MF;
- std::unique_ptr<MCCodeEmitter> CodeEmitter;
- bool InShadow;
+ bool InShadow = false;
// RequiredShadowSize holds the length of the shadow specified in the most
// recently encountered STACKMAP instruction.
// CurrentShadowSize counts the number of bytes encoded since the most
// recently encountered STACKMAP, stopping when that number is greater than
// or equal to RequiredShadowSize.
- unsigned RequiredShadowSize, CurrentShadowSize;
+ unsigned RequiredShadowSize = 0, CurrentShadowSize = 0;
};
StackMapShadowTracker SMShadowTracker;
+ // This describes the kind of sled we're storing in the XRay table.
+ enum class SledKind : uint8_t {
+ FUNCTION_ENTER = 0,
+ FUNCTION_EXIT = 1,
+ TAIL_CALL = 2,
+ };
+
+ // The table will contain these structs that point to the sled, the function
+ // containing the sled, and what kind of sled (and whether they should always
+ // be instrumented).
+ struct XRayFunctionEntry {
+ const MCSymbol *Sled;
+ const MCSymbol *Function;
+ SledKind Kind;
+ bool AlwaysInstrument;
+ const class Function *Fn;
+ };
+
+ // All the sleds to be emitted.
+ std::vector<XRayFunctionEntry> Sleds;
+
// All instructions emitted by the X86AsmPrinter should use this helper
// method.
//
@@ -82,14 +103,26 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
void LowerPATCHPOINT(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerSTATEPOINT(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerFAULTING_LOAD_OP(const MachineInstr &MI, X86MCInstLower &MCIL);
+ void LowerPATCHABLE_OP(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerTlsAddr(X86MCInstLower &MCInstLowering, const MachineInstr &MI);
- public:
- explicit X86AsmPrinter(TargetMachine &TM,
- std::unique_ptr<MCStreamer> Streamer)
- : AsmPrinter(TM, std::move(Streamer)), SM(*this), FM(*this),
- SMShadowTracker(TM) {}
+ // XRay-specific lowering for X86.
+ void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI,
+ X86MCInstLower &MCIL);
+ void LowerPATCHABLE_RET(const MachineInstr &MI, X86MCInstLower &MCIL);
+ void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI, X86MCInstLower &MCIL);
+
+ // Helper function that emits the XRay sleds we've collected for a particular
+ // function.
+ void EmitXRayTable();
+
+ // Helper function to record a given XRay sled.
+ void recordSled(MCSymbol *Sled, const MachineInstr &MI, SledKind Kind);
+public:
+ explicit X86AsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)), SM(*this), FM(*this) {}
const char *getPassName() const override {
return "X86 Assembly / Object Emitter";
diff --git a/lib/Target/X86/X86CallFrameOptimization.cpp b/lib/Target/X86/X86CallFrameOptimization.cpp
index fc6ee1752f1f1..b16fa76c73fae 100644
--- a/lib/Target/X86/X86CallFrameOptimization.cpp
+++ b/lib/Target/X86/X86CallFrameOptimization.cpp
@@ -10,9 +10,9 @@
// This file defines a pass that optimizes call sequences on x86.
// Currently, it converts movs of function parameters onto the stack into
// pushes. This is beneficial for two main reasons:
-// 1) The push instruction encoding is much smaller than an esp-relative mov
+// 1) The push instruction encoding is much smaller than a stack-ptr-based mov.
// 2) It is possible to push memory arguments directly. So, if the
-// the transformation is preformed pre-reg-alloc, it can help relieve
+// the transformation is performed pre-reg-alloc, it can help relieve
// register pressure.
//
//===----------------------------------------------------------------------===//
@@ -21,8 +21,8 @@
#include "X86.h"
#include "X86InstrInfo.h"
-#include "X86Subtarget.h"
#include "X86MachineFunctionInfo.h"
+#include "X86Subtarget.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -55,7 +55,7 @@ private:
struct CallContext {
CallContext()
: FrameSetup(nullptr), Call(nullptr), SPCopy(nullptr), ExpectedDist(0),
- MovVector(4, nullptr), NoStackParams(false), UsePush(false){}
+ MovVector(4, nullptr), NoStackParams(false), UsePush(false) {}
// Iterator referring to the frame setup instruction
MachineBasicBlock::iterator FrameSetup;
@@ -75,7 +75,7 @@ private:
// True if this call site has no stack parameters
bool NoStackParams;
- // True of this callsite can use push instructions
+ // True if this call site can use push instructions
bool UsePush;
};
@@ -88,7 +88,7 @@ private:
void collectCallInfo(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, CallContext &Context);
- bool adjustCallSequence(MachineFunction &MF, const CallContext &Context);
+ void adjustCallSequence(MachineFunction &MF, const CallContext &Context);
MachineInstr *canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup,
unsigned Reg);
@@ -105,12 +105,14 @@ private:
const TargetInstrInfo *TII;
const X86FrameLowering *TFL;
const X86Subtarget *STI;
- const MachineRegisterInfo *MRI;
+ MachineRegisterInfo *MRI;
+ unsigned SlotSize;
+ unsigned Log2SlotSize;
static char ID;
};
char X86CallFrameOptimization::ID = 0;
-}
+} // end anonymous namespace
FunctionPass *llvm::createX86CallFrameOptimization() {
return new X86CallFrameOptimization();
@@ -123,22 +125,19 @@ bool X86CallFrameOptimization::isLegal(MachineFunction &MF) {
if (NoX86CFOpt.getValue())
return false;
- // We currently only support call sequences where *all* parameters.
- // are passed on the stack.
- // No point in running this in 64-bit mode, since some arguments are
- // passed in-register in all common calling conventions, so the pattern
- // we're looking for will never match.
- if (STI->is64Bit())
- return false;
-
// We can't encode multiple DW_CFA_GNU_args_size or DW_CFA_def_cfa_offset
// in the compact unwind encoding that Darwin uses. So, bail if there
// is a danger of that being generated.
- if (STI->isTargetDarwin() &&
- (!MF.getMMI().getLandingPads().empty() ||
+ if (STI->isTargetDarwin() &&
+ (!MF.getMMI().getLandingPads().empty() ||
(MF.getFunction()->needsUnwindTableEntry() && !TFL->hasFP(MF))))
return false;
+ // It is not valid to change the stack pointer outside the prolog/epilog
+ // on 64-bit Windows.
+ if (STI->isTargetWin64())
+ return false;
+
// You would expect straight-line code between call-frame setup and
// call-frame destroy. You would be wrong. There are circumstances (e.g.
// CMOV_GR8 expansion of a select that feeds a function call!) where we can
@@ -169,10 +168,10 @@ bool X86CallFrameOptimization::isLegal(MachineFunction &MF) {
return true;
}
-// Check whether this trasnformation is profitable for a particular
+// Check whether this transformation is profitable for a particular
// function - in terms of code size.
-bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
- ContextVector &CallSeqVector) {
+bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
+ ContextVector &CallSeqVector) {
// This transformation is always a win when we do not expect to have
// a reserved call frame. Under other circumstances, it may be either
// a win or a loss, and requires a heuristic.
@@ -180,10 +179,6 @@ bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
if (CannotReserveFrame)
return true;
- // Don't do this when not optimizing for size.
- if (!MF.getFunction()->optForSize())
- return false;
-
unsigned StackAlign = TFL->getStackAlignment();
int64_t Advantage = 0;
@@ -206,16 +201,16 @@ bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
// We can use pushes. First, account for the fixed costs.
// We'll need a add after the call.
Advantage -= 3;
- // If we have to realign the stack, we'll also need and sub before
+ // If we have to realign the stack, we'll also need a sub before
if (CC.ExpectedDist % StackAlign)
Advantage -= 3;
// Now, for each push, we save ~3 bytes. For small constants, we actually,
// save more (up to 5 bytes), but 3 should be a good approximation.
- Advantage += (CC.ExpectedDist / 4) * 3;
+ Advantage += (CC.ExpectedDist >> Log2SlotSize) * 3;
}
}
- return (Advantage >= 0);
+ return Advantage >= 0;
}
bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {
@@ -224,6 +219,12 @@ bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {
TFL = STI->getFrameLowering();
MRI = &MF.getRegInfo();
+ const X86RegisterInfo &RegInfo =
+ *static_cast<const X86RegisterInfo *>(STI->getRegisterInfo());
+ SlotSize = RegInfo.getSlotSize();
+ assert(isPowerOf2_32(SlotSize) && "Expect power of 2 stack slot size");
+ Log2SlotSize = Log2_32(SlotSize);
+
if (!isLegal(MF))
return false;
@@ -233,20 +234,23 @@ bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {
ContextVector CallSeqVector;
- for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB)
- for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I)
- if (I->getOpcode() == FrameSetupOpcode) {
+ for (auto &MBB : MF)
+ for (auto &MI : MBB)
+ if (MI.getOpcode() == FrameSetupOpcode) {
CallContext Context;
- collectCallInfo(MF, *BB, I, Context);
+ collectCallInfo(MF, MBB, MI, Context);
CallSeqVector.push_back(Context);
}
if (!isProfitable(MF, CallSeqVector))
return false;
- for (auto CC : CallSeqVector)
- if (CC.UsePush)
- Changed |= adjustCallSequence(MF, CC);
+ for (auto CC : CallSeqVector) {
+ if (CC.UsePush) {
+ adjustCallSequence(MF, CC);
+ Changed = true;
+ }
+ }
return Changed;
}
@@ -260,7 +264,8 @@ X86CallFrameOptimization::classifyInstruction(
// The instructions we actually care about are movs onto the stack
int Opcode = MI->getOpcode();
- if (Opcode == X86::MOV32mi || Opcode == X86::MOV32mr)
+ if (Opcode == X86::MOV32mi || Opcode == X86::MOV32mr ||
+ Opcode == X86::MOV64mi32 || Opcode == X86::MOV64mr)
return Convert;
// Not all calling conventions have only stack MOVs between the stack
@@ -315,8 +320,8 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
CallContext &Context) {
// Check that this particular call sequence is amenable to the
// transformation.
- const X86RegisterInfo &RegInfo = *static_cast<const X86RegisterInfo *>(
- STI->getRegisterInfo());
+ const X86RegisterInfo &RegInfo =
+ *static_cast<const X86RegisterInfo *>(STI->getRegisterInfo());
unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
// We expect to enter this at the beginning of a call sequence
@@ -326,7 +331,8 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
// How much do we adjust the stack? This puts an upper bound on
// the number of parameters actually passed on it.
- unsigned int MaxAdjust = FrameSetup->getOperand(0).getImm() / 4;
+ unsigned int MaxAdjust =
+ FrameSetup->getOperand(0).getImm() >> Log2SlotSize;
// A zero adjustment means no stack parameters
if (!MaxAdjust) {
@@ -340,19 +346,19 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
while (I->getOpcode() == X86::LEA32r)
++I;
- // We expect a copy instruction here.
- // TODO: The copy instruction is a lowering artifact.
- // We should also support a copy-less version, where the stack
- // pointer is used directly.
- if (!I->isCopy() || !I->getOperand(0).isReg())
- return;
- Context.SPCopy = I++;
-
- unsigned StackPtr = Context.SPCopy->getOperand(0).getReg();
+ unsigned StackPtr = RegInfo.getStackRegister();
+ // SelectionDAG (but not FastISel) inserts a copy of ESP into a virtual
+ // register here. If it's there, use that virtual register as stack pointer
+ // instead.
+ if (I->isCopy() && I->getOperand(0).isReg() && I->getOperand(1).isReg() &&
+ I->getOperand(1).getReg() == StackPtr) {
+ Context.SPCopy = &*I++;
+ StackPtr = Context.SPCopy->getOperand(0).getReg();
+ }
// Scan the call setup sequence for the pattern we're looking for.
- // We only handle a simple case - a sequence of MOV32mi or MOV32mr
- // instructions, that push a sequence of 32-bit values onto the stack, with
+ // We only handle a simple case - a sequence of store instructions that
+ // push a sequence of stack-slot-aligned values onto the stack, with
// no gaps between them.
if (MaxAdjust > 4)
Context.MovVector.resize(MaxAdjust, nullptr);
@@ -367,9 +373,9 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
continue;
}
- // We know the instruction is a MOV32mi/MOV32mr.
+ // We know the instruction has a supported store opcode.
// We only want movs of the form:
- // movl imm/r32, k(%esp)
+ // mov imm/reg, k(%StackPtr)
// If we run into something else, bail.
// Note that AddrBaseReg may, counter to its name, not be a register,
// but rather a frame index.
@@ -390,9 +396,9 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
"Negative stack displacement when passing parameters");
// We really don't want to consider the unaligned case.
- if (StackDisp % 4)
+ if (StackDisp & (SlotSize - 1))
return;
- StackDisp /= 4;
+ StackDisp >>= Log2SlotSize;
assert((size_t)StackDisp < Context.MovVector.size() &&
"Function call has more parameters than the stack is adjusted for.");
@@ -400,7 +406,7 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
// If the same stack slot is being filled twice, something's fishy.
if (Context.MovVector[StackDisp] != nullptr)
return;
- Context.MovVector[StackDisp] = I;
+ Context.MovVector[StackDisp] = &*I;
for (const MachineOperand &MO : I->uses()) {
if (!MO.isReg())
@@ -418,14 +424,14 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
if (I == MBB.end() || !I->isCall())
return;
- Context.Call = I;
+ Context.Call = &*I;
if ((++I)->getOpcode() != FrameDestroyOpcode)
return;
// Now, go through the vector, and see that we don't have any gaps,
- // but only a series of 32-bit MOVs.
+ // but only a series of MOVs.
auto MMI = Context.MovVector.begin(), MME = Context.MovVector.end();
- for (; MMI != MME; ++MMI, Context.ExpectedDist += 4)
+ for (; MMI != MME; ++MMI, Context.ExpectedDist += SlotSize)
if (*MMI == nullptr)
break;
@@ -440,10 +446,9 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
return;
Context.UsePush = true;
- return;
}
-bool X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
+void X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
const CallContext &Context) {
// Ok, we can in fact do the transformation for this call.
// Do not remove the FrameSetup instruction, but adjust the parameters.
@@ -453,15 +458,21 @@ bool X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
FrameSetup->getOperand(1).setImm(Context.ExpectedDist);
DebugLoc DL = FrameSetup->getDebugLoc();
+ bool Is64Bit = STI->is64Bit();
// Now, iterate through the vector in reverse order, and replace the movs
// with pushes. MOVmi/MOVmr doesn't have any defs, so no need to
// replace uses.
- for (int Idx = (Context.ExpectedDist / 4) - 1; Idx >= 0; --Idx) {
+ for (int Idx = (Context.ExpectedDist >> Log2SlotSize) - 1; Idx >= 0; --Idx) {
MachineBasicBlock::iterator MOV = *Context.MovVector[Idx];
MachineOperand PushOp = MOV->getOperand(X86::AddrNumOperands);
MachineBasicBlock::iterator Push = nullptr;
- if (MOV->getOpcode() == X86::MOV32mi) {
- unsigned PushOpcode = X86::PUSHi32;
+ unsigned PushOpcode;
+ switch (MOV->getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected Opcode!");
+ case X86::MOV32mi:
+ case X86::MOV64mi32:
+ PushOpcode = Is64Bit ? X86::PUSH64i32 : X86::PUSHi32;
// If the operand is a small (8-bit) immediate, we can use a
// PUSH instruction with a shorter encoding.
// Note that isImm() may fail even though this is a MOVmi, because
@@ -469,13 +480,27 @@ bool X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
if (PushOp.isImm()) {
int64_t Val = PushOp.getImm();
if (isInt<8>(Val))
- PushOpcode = X86::PUSH32i8;
+ PushOpcode = Is64Bit ? X86::PUSH64i8 : X86::PUSH32i8;
}
Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode))
- .addOperand(PushOp);
- } else {
+ .addOperand(PushOp);
+ break;
+ case X86::MOV32mr:
+ case X86::MOV64mr:
unsigned int Reg = PushOp.getReg();
+ // If storing a 32-bit vreg on 64-bit targets, extend to a 64-bit vreg
+ // in preparation for the PUSH64. The upper 32 bits can be undef.
+ if (Is64Bit && MOV->getOpcode() == X86::MOV32mr) {
+ unsigned UndefReg = MRI->createVirtualRegister(&X86::GR64RegClass);
+ Reg = MRI->createVirtualRegister(&X86::GR64RegClass);
+ BuildMI(MBB, Context.Call, DL, TII->get(X86::IMPLICIT_DEF), UndefReg);
+ BuildMI(MBB, Context.Call, DL, TII->get(X86::INSERT_SUBREG), Reg)
+ .addReg(UndefReg)
+ .addOperand(PushOp)
+ .addImm(X86::sub_32bit);
+ }
+
// If PUSHrmm is not slow on this target, try to fold the source of the
// push into the instruction.
bool SlowPUSHrmm = STI->isAtom() || STI->isSLM();
@@ -484,7 +509,8 @@ bool X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
// conservative about that.
MachineInstr *DefMov = nullptr;
if (!SlowPUSHrmm && (DefMov = canFoldIntoRegPush(FrameSetup, Reg))) {
- Push = BuildMI(MBB, Context.Call, DL, TII->get(X86::PUSH32rmm));
+ PushOpcode = Is64Bit ? X86::PUSH64rmm : X86::PUSH32rmm;
+ Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode));
unsigned NumOps = DefMov->getDesc().getNumOperands();
for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
@@ -492,33 +518,34 @@ bool X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
DefMov->eraseFromParent();
} else {
- Push = BuildMI(MBB, Context.Call, DL, TII->get(X86::PUSH32r))
- .addReg(Reg)
- .getInstr();
+ PushOpcode = Is64Bit ? X86::PUSH64r : X86::PUSH32r;
+ Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode))
+ .addReg(Reg)
+ .getInstr();
}
+ break;
}
// For debugging, when using SP-based CFA, we need to adjust the CFA
// offset after each push.
// TODO: This is needed only if we require precise CFA.
if (!TFL->hasFP(MF))
- TFL->BuildCFI(MBB, std::next(Push), DL,
- MCCFIInstruction::createAdjustCfaOffset(nullptr, 4));
+ TFL->BuildCFI(
+ MBB, std::next(Push), DL,
+ MCCFIInstruction::createAdjustCfaOffset(nullptr, SlotSize));
MBB.erase(MOV);
}
// The stack-pointer copy is no longer used in the call sequences.
// There should not be any other users, but we can't commit to that, so:
- if (MRI->use_empty(Context.SPCopy->getOperand(0).getReg()))
+ if (Context.SPCopy && MRI->use_empty(Context.SPCopy->getOperand(0).getReg()))
Context.SPCopy->eraseFromParent();
// Once we've done this, we need to make sure PEI doesn't assume a reserved
// frame.
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
FuncInfo->setHasPushSequences(true);
-
- return true;
}
MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush(
@@ -540,19 +567,20 @@ MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush(
if (!MRI->hasOneNonDBGUse(Reg))
return nullptr;
- MachineBasicBlock::iterator DefMI = MRI->getVRegDef(Reg);
+ MachineInstr &DefMI = *MRI->getVRegDef(Reg);
// Make sure the def is a MOV from memory.
- // If the def is an another block, give up.
- if (DefMI->getOpcode() != X86::MOV32rm ||
- DefMI->getParent() != FrameSetup->getParent())
+ // If the def is in another block, give up.
+ if ((DefMI.getOpcode() != X86::MOV32rm &&
+ DefMI.getOpcode() != X86::MOV64rm) ||
+ DefMI.getParent() != FrameSetup->getParent())
return nullptr;
// Make sure we don't have any instructions between DefMI and the
// push that make folding the load illegal.
- for (auto I = DefMI; I != FrameSetup; ++I)
+ for (MachineBasicBlock::iterator I = DefMI; I != FrameSetup; ++I)
if (I->isLoadFoldBarrier())
return nullptr;
- return DefMI;
+ return &DefMI;
}
diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td
index ed2e880671688..4cb62b56bce4d 100644
--- a/lib/Target/X86/X86CallingConv.td
+++ b/lib/Target/X86/X86CallingConv.td
@@ -162,6 +162,9 @@ def RetCC_X86_64_C : CallingConv<[
// MMX vector types are always returned in XMM0.
CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>,
+
+ CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
+
CCDelegateTo<RetCC_X86Common>
]>;
@@ -192,6 +195,24 @@ def RetCC_X86_64_WebKit_JS : CallingConv<[
CCIfType<[i64], CCAssignToReg<[RAX]>>
]>;
+def RetCC_X86_64_Swift : CallingConv<[
+ // For integers, ECX, R8D can be used as extra return registers.
+ CCIfType<[i1], CCPromoteToType<i8>>,
+ CCIfType<[i8] , CCAssignToReg<[AL, DL, CL, R8B]>>,
+ CCIfType<[i16], CCAssignToReg<[AX, DX, CX, R8W]>>,
+ CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX, R8D]>>,
+ CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX, R8]>>,
+
+ // XMM0, XMM1, XMM2 and XMM3 can be used to return FP values.
+ CCIfType<[f32], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
+ CCIfType<[f64], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
+ CCIfType<[f128], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
+
+ // MMX vector types are returned in XMM0, XMM1, XMM2 and XMM3.
+ CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
+ CCDelegateTo<RetCC_X86Common>
+]>;
+
// X86-64 AnyReg return-value convention. No explicit register is specified for
// the return-value. The register allocator is allowed and expected to choose
// any free register.
@@ -234,6 +255,9 @@ def RetCC_X86_64 : CallingConv<[
CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<RetCC_X86_64_WebKit_JS>>,
CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_X86_64_AnyReg>>,
+ // Handle Swift calls.
+ CCIfCC<"CallingConv::Swift", CCDelegateTo<RetCC_X86_64_Swift>>,
+
// Handle explicit CC selection
CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,
@@ -273,6 +297,16 @@ def CC_X86_64_C : CallingConv<[
CCIfNest<CCIfSubtarget<"isTarget64BitILP32()", CCAssignToReg<[R10D]>>>,
CCIfNest<CCAssignToReg<[R10]>>,
+ // Pass SwiftSelf in a callee saved register.
+ CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[R13]>>>,
+
+ // A SwiftError is passed in R12.
+ CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
+
+ // For Swift Calling Convention, pass sret in %RAX.
+ CCIfCC<"CallingConv::Swift",
+ CCIfSRet<CCIfType<[i64], CCAssignToReg<[RAX]>>>>,
+
// The first 6 integer arguments are passed in integer registers.
CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX, R8D, R9D]>>,
CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX, R8 , R9 ]>>,
@@ -770,6 +804,9 @@ def CC_X86_64_Intr : CallingConv<[
// This is the root argument convention for the X86-32 backend.
def CC_X86_32 : CallingConv<[
+ // X86_INTR calling convention is valid in MCU target and should override the
+ // MCU calling convention. Thus, this should be checked before isTargetMCU().
+ CCIfCC<"CallingConv::X86_INTR", CCDelegateTo<CC_X86_32_Intr>>,
CCIfSubtarget<"isTargetMCU()", CCDelegateTo<CC_X86_32_MCU>>,
CCIfCC<"CallingConv::X86_FastCall", CCDelegateTo<CC_X86_32_FastCall>>,
CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_32_VectorCall>>,
@@ -777,7 +814,6 @@ def CC_X86_32 : CallingConv<[
CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_X86_32_FastCC>>,
CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_32_GHC>>,
CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_32_HiPE>>,
- CCIfCC<"CallingConv::X86_INTR", CCDelegateTo<CC_X86_32_Intr>>,
// Otherwise, drop to normal X86-32 CC
CCDelegateTo<CC_X86_32_C>
@@ -819,6 +855,8 @@ def CSR_NoRegs : CalleeSavedRegs<(add)>;
def CSR_32 : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>;
def CSR_64 : CalleeSavedRegs<(add RBX, R12, R13, R14, R15, RBP)>;
+def CSR_64_SwiftError : CalleeSavedRegs<(sub CSR_64, R12)>;
+
def CSR_32EHRet : CalleeSavedRegs<(add EAX, EDX, CSR_32)>;
def CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>;
@@ -852,15 +890,23 @@ def CSR_64_MostRegs : CalleeSavedRegs<(add RBX, RCX, RDX, RSI, RDI, R8, R9, R10,
(sequence "XMM%u", 0, 15))>;
def CSR_32_AllRegs : CalleeSavedRegs<(add EAX, EBX, ECX, EDX, EBP, ESI,
- EDI, ESP)>;
+ EDI)>;
def CSR_32_AllRegs_SSE : CalleeSavedRegs<(add CSR_32_AllRegs,
(sequence "XMM%u", 0, 7))>;
-
-def CSR_64_AllRegs : CalleeSavedRegs<(add CSR_64_MostRegs, RAX, RSP,
- (sequence "XMM%u", 16, 31))>;
-def CSR_64_AllRegs_AVX : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX, RSP,
- (sequence "YMM%u", 0, 31)),
+def CSR_32_AllRegs_AVX : CalleeSavedRegs<(add CSR_32_AllRegs,
+ (sequence "YMM%u", 0, 7))>;
+def CSR_32_AllRegs_AVX512 : CalleeSavedRegs<(add CSR_32_AllRegs,
+ (sequence "ZMM%u", 0, 7),
+ (sequence "K%u", 0, 7))>;
+
+def CSR_64_AllRegs : CalleeSavedRegs<(add CSR_64_MostRegs, RAX)>;
+def CSR_64_AllRegs_AVX : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX,
+ (sequence "YMM%u", 0, 15)),
(sequence "XMM%u", 0, 15))>;
+def CSR_64_AllRegs_AVX512 : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX,
+ (sequence "ZMM%u", 0, 31),
+ (sequence "K%u", 0, 7)),
+ (sequence "XMM%u", 0, 15))>;
// Standard C + YMM6-15
def CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12,
diff --git a/lib/Target/X86/X86ExpandPseudo.cpp b/lib/Target/X86/X86ExpandPseudo.cpp
index a09d065193768..093fed7276f7f 100644
--- a/lib/Target/X86/X86ExpandPseudo.cpp
+++ b/lib/Target/X86/X86ExpandPseudo.cpp
@@ -44,10 +44,16 @@ public:
const X86Subtarget *STI;
const X86InstrInfo *TII;
const X86RegisterInfo *TRI;
+ const X86MachineFunctionInfo *X86FI;
const X86FrameLowering *X86FL;
bool runOnMachineFunction(MachineFunction &Fn) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override {
return "X86 pseudo instruction expansion pass";
}
@@ -83,11 +89,18 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
// Adjust stack pointer.
int StackAdj = StackAdjust.getImm();
+ int MaxTCDelta = X86FI->getTCReturnAddrDelta();
+ int Offset = 0;
+ assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
+
+ // Incoporate the retaddr area.
+ Offset = StackAdj-MaxTCDelta;
+ assert(Offset >= 0 && "Offset should never be negative");
- if (StackAdj) {
+ if (Offset) {
// Check for possible merge with preceding ADD instruction.
- StackAdj += X86FL->mergeSPUpdates(MBB, MBBI, true);
- X86FL->emitSPUpdate(MBB, MBBI, StackAdj, /*InEpilogue=*/true);
+ Offset += X86FL->mergeSPUpdates(MBB, MBBI, true);
+ X86FL->emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
}
// Jump to label or value in register.
@@ -121,8 +134,8 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
.addReg(JumpTarget.getReg(), RegState::Kill);
}
- MachineInstr *NewMI = std::prev(MBBI);
- NewMI->copyImplicitOps(*MBBI->getParent()->getParent(), MBBI);
+ MachineInstr &NewMI = *std::prev(MBBI);
+ NewMI.copyImplicitOps(*MBBI->getParent()->getParent(), *MBBI);
// Delete the pseudo instruction TCRETURN.
MBB.erase(MBBI);
@@ -152,6 +165,32 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
MBB.erase(MBBI);
return true;
}
+ case X86::RET: {
+ // Adjust stack to erase error code
+ int64_t StackAdj = MBBI->getOperand(0).getImm();
+ MachineInstrBuilder MIB;
+ if (StackAdj == 0) {
+ MIB = BuildMI(MBB, MBBI, DL,
+ TII->get(STI->is64Bit() ? X86::RETQ : X86::RETL));
+ } else if (isUInt<16>(StackAdj)) {
+ MIB = BuildMI(MBB, MBBI, DL,
+ TII->get(STI->is64Bit() ? X86::RETIQ : X86::RETIL))
+ .addImm(StackAdj);
+ } else {
+ assert(!STI->is64Bit() &&
+ "shouldn't need to do this for x86_64 targets!");
+ // A ret can only handle immediates as big as 2**16-1. If we need to pop
+ // off bytes before the return address, we must do it manually.
+ BuildMI(MBB, MBBI, DL, TII->get(X86::POP32r)).addReg(X86::ECX, RegState::Define);
+ X86FL->emitSPUpdate(MBB, MBBI, StackAdj, /*InEpilogue=*/true);
+ BuildMI(MBB, MBBI, DL, TII->get(X86::PUSH32r)).addReg(X86::ECX);
+ MIB = BuildMI(MBB, MBBI, DL, TII->get(X86::RETL));
+ }
+ for (unsigned I = 1, E = MBBI->getNumOperands(); I != E; ++I)
+ MIB.addOperand(MBBI->getOperand(I));
+ MBB.erase(MBBI);
+ return true;
+ }
case X86::EH_RESTORE: {
// Restore ESP and EBP, and optionally ESI if required.
bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(
@@ -160,6 +199,38 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
MBBI->eraseFromParent();
return true;
}
+ case X86::LCMPXCHG8B_SAVE_EBX:
+ case X86::LCMPXCHG16B_SAVE_RBX: {
+ // Perform the following transformation.
+ // SaveRbx = pseudocmpxchg Addr, <4 opds for the address>, InArg, SaveRbx
+ // =>
+ // [E|R]BX = InArg
+ // actualcmpxchg Addr
+ // [E|R]BX = SaveRbx
+ const MachineOperand &InArg = MBBI->getOperand(6);
+ unsigned SaveRbx = MBBI->getOperand(7).getReg();
+
+ unsigned ActualInArg =
+ Opcode == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
+ // Copy the input argument of the pseudo into the argument of the
+ // actual instruction.
+ TII->copyPhysReg(MBB, MBBI, DL, ActualInArg, InArg.getReg(),
+ InArg.isKill());
+ // Create the actual instruction.
+ unsigned ActualOpc =
+ Opcode == X86::LCMPXCHG8B_SAVE_EBX ? X86::LCMPXCHG8B : X86::LCMPXCHG16B;
+ MachineInstr *NewInstr = BuildMI(MBB, MBBI, DL, TII->get(ActualOpc));
+ // Copy the operands related to the address.
+ for (unsigned Idx = 1; Idx < 6; ++Idx)
+ NewInstr->addOperand(MBBI->getOperand(Idx));
+ // Finally, restore the value of RBX.
+ TII->copyPhysReg(MBB, MBBI, DL, ActualInArg, SaveRbx,
+ /*SrcIsKill*/ true);
+
+ // Delete the pseudo.
+ MBBI->eraseFromParent();
+ return true;
+ }
}
llvm_unreachable("Previous switch has a fallthrough?");
}
@@ -184,6 +255,7 @@ bool X86ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
STI = &static_cast<const X86Subtarget &>(MF.getSubtarget());
TII = STI->getInstrInfo();
TRI = STI->getRegisterInfo();
+ X86FI = MF.getInfo<X86MachineFunctionInfo>();
X86FL = STI->getFrameLowering();
bool Modified = false;
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index f48b47934e038..dfe3c80be21d1 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -22,7 +22,6 @@
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
-#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -30,6 +29,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
@@ -82,7 +82,8 @@ public:
#include "X86GenFastISel.inc"
private:
- bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT, DebugLoc DL);
+ bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT,
+ const DebugLoc &DL);
bool X86FastEmitLoad(EVT VT, X86AddressMode &AM, MachineMemOperand *MMO,
unsigned &ResultReg, unsigned Alignment = 1);
@@ -347,6 +348,11 @@ bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
bool X86FastISel::X86FastEmitLoad(EVT VT, X86AddressMode &AM,
MachineMemOperand *MMO, unsigned &ResultReg,
unsigned Alignment) {
+ bool HasSSE41 = Subtarget->hasSSE41();
+ bool HasAVX = Subtarget->hasAVX();
+ bool HasAVX2 = Subtarget->hasAVX2();
+ bool IsNonTemporal = MMO && MMO->isNonTemporal();
+
// Get opcode and regclass of the output for the given load instruction.
unsigned Opc = 0;
const TargetRegisterClass *RC = nullptr;
@@ -372,7 +378,7 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, X86AddressMode &AM,
break;
case MVT::f32:
if (X86ScalarSSEf32) {
- Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
+ Opc = HasAVX ? X86::VMOVSSrm : X86::MOVSSrm;
RC = &X86::FR32RegClass;
} else {
Opc = X86::LD_Fp32m;
@@ -381,7 +387,7 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, X86AddressMode &AM,
break;
case MVT::f64:
if (X86ScalarSSEf64) {
- Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
+ Opc = HasAVX ? X86::VMOVSDrm : X86::MOVSDrm;
RC = &X86::FR64RegClass;
} else {
Opc = X86::LD_Fp64m;
@@ -392,29 +398,91 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, X86AddressMode &AM,
// No f80 support yet.
return false;
case MVT::v4f32:
- if (Alignment >= 16)
- Opc = Subtarget->hasAVX() ? X86::VMOVAPSrm : X86::MOVAPSrm;
+ if (IsNonTemporal && Alignment >= 16 && HasSSE41)
+ Opc = HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
+ else if (Alignment >= 16)
+ Opc = HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm;
else
- Opc = Subtarget->hasAVX() ? X86::VMOVUPSrm : X86::MOVUPSrm;
+ Opc = HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm;
RC = &X86::VR128RegClass;
break;
case MVT::v2f64:
- if (Alignment >= 16)
- Opc = Subtarget->hasAVX() ? X86::VMOVAPDrm : X86::MOVAPDrm;
+ if (IsNonTemporal && Alignment >= 16 && HasSSE41)
+ Opc = HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
+ else if (Alignment >= 16)
+ Opc = HasAVX ? X86::VMOVAPDrm : X86::MOVAPDrm;
else
- Opc = Subtarget->hasAVX() ? X86::VMOVUPDrm : X86::MOVUPDrm;
+ Opc = HasAVX ? X86::VMOVUPDrm : X86::MOVUPDrm;
RC = &X86::VR128RegClass;
break;
case MVT::v4i32:
case MVT::v2i64:
case MVT::v8i16:
case MVT::v16i8:
- if (Alignment >= 16)
- Opc = Subtarget->hasAVX() ? X86::VMOVDQArm : X86::MOVDQArm;
+ if (IsNonTemporal && Alignment >= 16)
+ Opc = HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
+ else if (Alignment >= 16)
+ Opc = HasAVX ? X86::VMOVDQArm : X86::MOVDQArm;
else
- Opc = Subtarget->hasAVX() ? X86::VMOVDQUrm : X86::MOVDQUrm;
+ Opc = HasAVX ? X86::VMOVDQUrm : X86::MOVDQUrm;
RC = &X86::VR128RegClass;
break;
+ case MVT::v8f32:
+ assert(HasAVX);
+ if (IsNonTemporal && Alignment >= 32 && HasAVX2)
+ Opc = X86::VMOVNTDQAYrm;
+ else
+ Opc = (Alignment >= 32) ? X86::VMOVAPSYrm : X86::VMOVUPSYrm;
+ RC = &X86::VR256RegClass;
+ break;
+ case MVT::v4f64:
+ assert(HasAVX);
+ if (IsNonTemporal && Alignment >= 32 && HasAVX2)
+ Opc = X86::VMOVNTDQAYrm;
+ else
+ Opc = (Alignment >= 32) ? X86::VMOVAPDYrm : X86::VMOVUPDYrm;
+ RC = &X86::VR256RegClass;
+ break;
+ case MVT::v8i32:
+ case MVT::v4i64:
+ case MVT::v16i16:
+ case MVT::v32i8:
+ assert(HasAVX);
+ if (IsNonTemporal && Alignment >= 32 && HasAVX2)
+ Opc = X86::VMOVNTDQAYrm;
+ else
+ Opc = (Alignment >= 32) ? X86::VMOVDQAYrm : X86::VMOVDQUYrm;
+ RC = &X86::VR256RegClass;
+ break;
+ case MVT::v16f32:
+ assert(Subtarget->hasAVX512());
+ if (IsNonTemporal && Alignment >= 64)
+ Opc = X86::VMOVNTDQAZrm;
+ else
+ Opc = (Alignment >= 64) ? X86::VMOVAPSZrm : X86::VMOVUPSZrm;
+ RC = &X86::VR512RegClass;
+ break;
+ case MVT::v8f64:
+ assert(Subtarget->hasAVX512());
+ if (IsNonTemporal && Alignment >= 64)
+ Opc = X86::VMOVNTDQAZrm;
+ else
+ Opc = (Alignment >= 64) ? X86::VMOVAPDZrm : X86::VMOVUPDZrm;
+ RC = &X86::VR512RegClass;
+ break;
+ case MVT::v8i64:
+ case MVT::v16i32:
+ case MVT::v32i16:
+ case MVT::v64i8:
+ assert(Subtarget->hasAVX512());
+ // Note: There are a lot more choices based on type with AVX-512, but
+ // there's really no advantage when the load isn't masked.
+ if (IsNonTemporal && Alignment >= 64)
+ Opc = X86::VMOVNTDQAZrm;
+ else
+ Opc = (Alignment >= 64) ? X86::VMOVDQA64Zrm : X86::VMOVDQU64Zrm;
+ RC = &X86::VR512RegClass;
+ break;
}
ResultReg = createResultReg(RC);
@@ -507,12 +575,70 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
else
Opc = HasAVX ? X86::VMOVDQAmr : X86::MOVDQAmr;
} else
- Opc = Subtarget->hasAVX() ? X86::VMOVDQUmr : X86::MOVDQUmr;
+ Opc = HasAVX ? X86::VMOVDQUmr : X86::MOVDQUmr;
+ break;
+ case MVT::v8f32:
+ assert(HasAVX);
+ if (Aligned)
+ Opc = IsNonTemporal ? X86::VMOVNTPSYmr : X86::VMOVAPSYmr;
+ else
+ Opc = X86::VMOVUPSYmr;
+ break;
+ case MVT::v4f64:
+ assert(HasAVX);
+ if (Aligned) {
+ Opc = IsNonTemporal ? X86::VMOVNTPDYmr : X86::VMOVAPDYmr;
+ } else
+ Opc = X86::VMOVUPDYmr;
+ break;
+ case MVT::v8i32:
+ case MVT::v4i64:
+ case MVT::v16i16:
+ case MVT::v32i8:
+ assert(HasAVX);
+ if (Aligned)
+ Opc = IsNonTemporal ? X86::VMOVNTDQYmr : X86::VMOVDQAYmr;
+ else
+ Opc = X86::VMOVDQUYmr;
+ break;
+ case MVT::v16f32:
+ assert(Subtarget->hasAVX512());
+ if (Aligned)
+ Opc = IsNonTemporal ? X86::VMOVNTPSZmr : X86::VMOVAPSZmr;
+ else
+ Opc = X86::VMOVUPSZmr;
+ break;
+ case MVT::v8f64:
+ assert(Subtarget->hasAVX512());
+ if (Aligned) {
+ Opc = IsNonTemporal ? X86::VMOVNTPDZmr : X86::VMOVAPDZmr;
+ } else
+ Opc = X86::VMOVUPDZmr;
+ break;
+ case MVT::v8i64:
+ case MVT::v16i32:
+ case MVT::v32i16:
+ case MVT::v64i8:
+ assert(Subtarget->hasAVX512());
+ // Note: There are a lot more choices based on type with AVX-512, but
+ // there's really no advantage when the store isn't masked.
+ if (Aligned)
+ Opc = IsNonTemporal ? X86::VMOVNTDQZmr : X86::VMOVDQA64Zmr;
+ else
+ Opc = X86::VMOVDQU64Zmr;
break;
}
+ const MCInstrDesc &Desc = TII.get(Opc);
+ // Some of the instructions in the previous switch use FR128 instead
+ // of FR32 for ValReg. Make sure the register we feed the instruction
+ // matches its register class constraints.
+ // Note: This is fine to do a copy from FR32 to FR128, this is the
+ // same registers behind the scene and actually why it did not trigger
+ // any bugs before.
+ ValReg = constrainOperandRegClass(Desc, ValReg, Desc.getNumOperands() - 1);
MachineInstrBuilder MIB =
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, Desc);
addFullAddress(MIB, AM).addReg(ValReg, getKillRegState(ValIsKill));
if (MMO)
MIB->addMemOperand(*FuncInfo.MF, MMO);
@@ -598,7 +724,7 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
AM.GV = GV;
// Allow the subtarget to classify the global.
- unsigned char GVFlags = Subtarget->ClassifyGlobalReference(GV, TM);
+ unsigned char GVFlags = Subtarget->classifyGlobalReference(GV);
// If this reference is relative to the pic base, set it now.
if (isGlobalRelativeToPICBase(GVFlags)) {
@@ -831,9 +957,8 @@ redo_gep:
// our address and just match the value instead of completely failing.
AM = SavedAM;
- for (SmallVectorImpl<const Value *>::reverse_iterator
- I = GEPs.rbegin(), E = GEPs.rend(); I != E; ++I)
- if (handleConstantAddresses(*I, AM))
+ for (const Value *I : reverse(GEPs))
+ if (handleConstantAddresses(I, AM))
return true;
return false;
@@ -938,10 +1063,8 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
// base and index registers are unused.
assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
AM.Base.Reg = X86::RIP;
- } else if (Subtarget->isPICStyleStubPIC()) {
- AM.GVOpFlags = X86II::MO_PIC_BASE_OFFSET;
- } else if (Subtarget->isPICStyleGOT()) {
- AM.GVOpFlags = X86II::MO_GOTOFF;
+ } else {
+ AM.GVOpFlags = Subtarget->classifyLocalReference(nullptr);
}
return true;
@@ -972,6 +1095,21 @@ bool X86FastISel::X86SelectStore(const Instruction *I) {
if (S->isAtomic())
return false;
+ const Value *PtrV = I->getOperand(1);
+ if (TLI.supportSwiftError()) {
+ // Swifterror values can come from either a function parameter with
+ // swifterror attribute or an alloca with swifterror attribute.
+ if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
+ if (Arg->hasSwiftErrorAttr())
+ return false;
+ }
+
+ if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
+ if (Alloca->isSwiftError())
+ return false;
+ }
+ }
+
const Value *Val = S->getValueOperand();
const Value *Ptr = S->getPointerOperand();
@@ -1002,6 +1140,10 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
if (!FuncInfo.CanLowerReturn)
return false;
+ if (TLI.supportSwiftError() &&
+ F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
+ return false;
+
if (TLI.supportSplitCSR(FuncInfo.MF))
return false;
@@ -1009,14 +1151,14 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
if (CC != CallingConv::C &&
CC != CallingConv::Fast &&
CC != CallingConv::X86_FastCall &&
- CC != CallingConv::X86_64_SysV)
+ CC != CallingConv::X86_StdCall &&
+ CC != CallingConv::X86_ThisCall &&
+ CC != CallingConv::X86_64_SysV &&
+ CC != CallingConv::X86_64_Win64)
return false;
- if (Subtarget->isCallingConvWin64(CC))
- return false;
-
- // Don't handle popping bytes on return for now.
- if (X86MFInfo->getBytesToPopOnReturn() != 0)
+ // Don't handle popping bytes if they don't fit the ret's immediate.
+ if (!isUInt<16>(X86MFInfo->getBytesToPopOnReturn()))
return false;
// fastcc with -tailcallopt is intended to provide a guaranteed
@@ -1101,11 +1243,14 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
RetRegs.push_back(VA.getLocReg());
}
+ // Swift calling convention does not require we copy the sret argument
+ // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
+
// All x86 ABIs require that for returning structs by value we copy
// the sret argument into %rax/%eax (depending on ABI) for the return.
// We saved the argument into a virtual register in the entry block,
// so now we copy the value out and into %rax/%eax.
- if (F.hasStructRetAttr()) {
+ if (F.hasStructRetAttr() && CC != CallingConv::Swift) {
unsigned Reg = X86MFInfo->getSRetReturnReg();
assert(Reg &&
"SRetReturnReg should have been set in LowerFormalArguments()!");
@@ -1116,9 +1261,15 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
}
// Now emit the RET.
- MachineInstrBuilder MIB =
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL));
+ MachineInstrBuilder MIB;
+ if (X86MFInfo->getBytesToPopOnReturn()) {
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Subtarget->is64Bit() ? X86::RETIQ : X86::RETIL))
+ .addImm(X86MFInfo->getBytesToPopOnReturn());
+ } else {
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL));
+ }
for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
MIB.addReg(RetRegs[i], RegState::Implicit);
return true;
@@ -1133,6 +1284,21 @@ bool X86FastISel::X86SelectLoad(const Instruction *I) {
if (LI->isAtomic())
return false;
+ const Value *SV = I->getOperand(0);
+ if (TLI.supportSwiftError()) {
+ // Swifterror values can come from either a function parameter with
+ // swifterror attribute or an alloca with swifterror attribute.
+ if (const Argument *Arg = dyn_cast<Argument>(SV)) {
+ if (Arg->hasSwiftErrorAttr())
+ return false;
+ }
+
+ if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
+ if (Alloca->isSwiftError())
+ return false;
+ }
+ }
+
MVT VT;
if (!isTypeLegal(LI->getType(), VT, /*AllowI1=*/true))
return false;
@@ -1204,8 +1370,8 @@ static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
}
}
-bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
- EVT VT, DebugLoc CurDbgLoc) {
+bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, EVT VT,
+ const DebugLoc &CurDbgLoc) {
unsigned Op0Reg = getRegForValue(Op0);
if (Op0Reg == 0) return false;
@@ -1244,6 +1410,9 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
if (!isTypeLegal(I->getOperand(0)->getType(), VT))
return false;
+ if (I->getType()->isIntegerTy(1) && Subtarget->hasAVX512())
+ return false;
+
// Try to optimize or fold the cmp.
CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
unsigned ResultReg = 0;
@@ -2294,8 +2463,10 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// register class VR128 by method 'constrainOperandRegClass' which is
// directly called by 'fastEmitInst_ri'.
// Instruction VCVTPS2PHrr takes an extra immediate operand which is
- // used to provide rounding control.
- InputReg = fastEmitInst_ri(X86::VCVTPS2PHrr, RC, InputReg, false, 0);
+ // used to provide rounding control: use MXCSR.RC, encoded as 0b100.
+ // It's consistent with the other FP instructions, which are usually
+ // controlled by MXCSR.
+ InputReg = fastEmitInst_ri(X86::VCVTPS2PHrr, RC, InputReg, false, 4);
// Move the lower 32-bits of ResultReg to another register of class GR32.
ResultReg = createResultReg(&X86::GR32RegClass);
@@ -2477,7 +2648,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT
// is not generated by FastISel yet.
// FIXME: Update this code once tablegen can handle it.
- static const unsigned SqrtOpc[2][2] = {
+ static const uint16_t SqrtOpc[2][2] = {
{X86::SQRTSSr, X86::VSQRTSSr},
{X86::SQRTSDr, X86::VSQRTSDr}
};
@@ -2577,7 +2748,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
unsigned ResultReg = 0;
// Check if we have an immediate version.
if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
- static const unsigned Opc[2][4] = {
+ static const uint16_t Opc[2][4] = {
{ X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r },
{ X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r }
};
@@ -2607,9 +2778,9 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit
// it manually.
if (BaseOpc == X86ISD::UMUL && !ResultReg) {
- static const unsigned MULOpc[] =
+ static const uint16_t MULOpc[] =
{ X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r };
- static const unsigned Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX };
+ static const MCPhysReg Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX };
// First copy the first operand into RAX, which is an implicit input to
// the X86::MUL*r instruction.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@@ -2618,7 +2789,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
TLI.getRegClassFor(VT), RHSReg, RHSIsKill);
} else if (BaseOpc == X86ISD::SMUL && !ResultReg) {
- static const unsigned MULOpc[] =
+ static const uint16_t MULOpc[] =
{ X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr };
if (VT == MVT::i8) {
// Copy the first operand into AL, which is an implicit input to the
@@ -2671,7 +2842,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
if (!isTypeLegal(RetTy, VT))
return false;
- static const unsigned CvtOpc[2][2][2] = {
+ static const uint16_t CvtOpc[2][2][2] = {
{ { X86::CVTTSS2SIrr, X86::VCVTTSS2SIrr },
{ X86::CVTTSS2SI64rr, X86::VCVTTSS2SI64rr } },
{ { X86::CVTTSD2SIrr, X86::VCVTTSD2SIrr },
@@ -2742,6 +2913,8 @@ bool X86FastISel::fastLowerArguments() {
if (F->getAttributes().hasAttribute(Idx, Attribute::ByVal) ||
F->getAttributes().hasAttribute(Idx, Attribute::InReg) ||
F->getAttributes().hasAttribute(Idx, Attribute::StructRet) ||
+ F->getAttributes().hasAttribute(Idx, Attribute::SwiftSelf) ||
+ F->getAttributes().hasAttribute(Idx, Attribute::SwiftError) ||
F->getAttributes().hasAttribute(Idx, Attribute::Nest))
return false;
@@ -2809,9 +2982,9 @@ bool X86FastISel::fastLowerArguments() {
return true;
}
-static unsigned computeBytesPoppedByCallee(const X86Subtarget *Subtarget,
- CallingConv::ID CC,
- ImmutableCallSite *CS) {
+static unsigned computeBytesPoppedByCalleeForSRet(const X86Subtarget *Subtarget,
+ CallingConv::ID CC,
+ ImmutableCallSite *CS) {
if (Subtarget->is64Bit())
return 0;
if (Subtarget->getTargetTriple().isOSMSVCRT())
@@ -2849,7 +3022,10 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
case CallingConv::C:
case CallingConv::Fast:
case CallingConv::WebKit_JS:
+ case CallingConv::Swift:
case CallingConv::X86_FastCall:
+ case CallingConv::X86_StdCall:
+ case CallingConv::X86_ThisCall:
case CallingConv::X86_64_Win64:
case CallingConv::X86_64_SysV:
break;
@@ -2873,10 +3049,9 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
if (CLI.CS && CLI.CS->hasInAllocaArgument())
return false;
- // Fast-isel doesn't know about callee-pop yet.
- if (X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg,
- TM.Options.GuaranteedTailCallOpt))
- return false;
+ for (auto Flag : CLI.OutFlags)
+ if (Flag.isSwiftError())
+ return false;
SmallVector<MVT, 16> OutVTs;
SmallVector<unsigned, 16> ArgRegs;
@@ -2964,6 +3139,10 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
case CCValAssign::SExt: {
assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
"Unexpected extend");
+
+ if (ArgVT.SimpleTy == MVT::i1)
+ return false;
+
bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
ArgVT, ArgReg);
assert(Emitted && "Failed to emit a sext!"); (void)Emitted;
@@ -2973,6 +3152,17 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
case CCValAssign::ZExt: {
assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
"Unexpected extend");
+
+ // Handle zero-extension from i1 to i8, which is common.
+ if (ArgVT.SimpleTy == MVT::i1) {
+ // Set the high bits to zero.
+ ArgReg = fastEmitZExtFromI1(MVT::i8, ArgReg, /*TODO: Kill=*/false);
+ ArgVT = MVT::i8;
+
+ if (ArgReg == 0)
+ return false;
+ }
+
bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
ArgVT, ArgReg);
assert(Emitted && "Failed to emit a zext!"); (void)Emitted;
@@ -3113,25 +3303,10 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
unsigned CallOpc = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32;
// See if we need any target-specific flags on the GV operand.
- unsigned char OpFlags = 0;
-
- // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
- // external symbols most go through the PLT in PIC mode. If the symbol
- // has hidden or protected visibility, or if it is static or local, then
- // we don't need to use the PLT - we can directly call it.
- if (Subtarget->isTargetELF() &&
- TM.getRelocationModel() == Reloc::PIC_ &&
- GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
- OpFlags = X86II::MO_PLT;
- } else if (Subtarget->isPICStyleStubAny() &&
- !GV->isStrongDefinitionForLinker() &&
- (!Subtarget->getTargetTriple().isMacOSX() ||
- Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
- // PC-relative references to external symbols should go through $stub,
- // unless we're building with the leopard linker or later, which
- // automatically synthesizes these stubs.
- OpFlags = X86II::MO_DARWIN_STUB;
- }
+ unsigned char OpFlags = Subtarget->classifyGlobalFunctionReference(GV);
+ // Ignore NonLazyBind attribute in FastISel
+ if (OpFlags == X86II::MO_GOTPCREL)
+ OpFlags = 0;
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc));
if (Symbol)
@@ -3157,7 +3332,10 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
// Issue CALLSEQ_END
unsigned NumBytesForCalleeToPop =
- computeBytesPoppedByCallee(Subtarget, CC, CLI.CS);
+ X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg,
+ TM.Options.GuaranteedTailCallOpt)
+ ? NumBytes // Callee pops everything.
+ : computeBytesPoppedByCalleeForSRet(Subtarget, CC, CLI.CS);
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
.addImm(NumBytes).addImm(NumBytesForCalleeToPop);
@@ -3398,17 +3576,13 @@ unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
// x86-32 PIC requires a PIC base register for constant pools.
unsigned PICBase = 0;
- unsigned char OpFlag = 0;
- if (Subtarget->isPICStyleStubPIC()) { // Not dynamic-no-pic
- OpFlag = X86II::MO_PIC_BASE_OFFSET;
+ unsigned char OpFlag = Subtarget->classifyLocalReference(nullptr);
+ if (OpFlag == X86II::MO_PIC_BASE_OFFSET)
PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
- } else if (Subtarget->isPICStyleGOT()) {
- OpFlag = X86II::MO_GOTOFF;
+ else if (OpFlag == X86II::MO_GOTOFF)
PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
- } else if (Subtarget->isPICStyleRIPRel() &&
- TM.getCodeModel() == CodeModel::Small) {
+ else if (Subtarget->is64Bit() && TM.getCodeModel() == CodeModel::Small)
PICBase = X86::RIP;
- }
// Create the load from the constant pool.
unsigned CPI = MCP.getConstantPoolIndex(CFP, Align);
@@ -3572,7 +3746,7 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
AM.getFullAddress(AddrOps);
MachineInstr *Result = XII.foldMemoryOperandImpl(
- *FuncInfo.MF, MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, Alignment,
+ *FuncInfo.MF, *MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, Alignment,
/*AllowCommute=*/true);
if (!Result)
return false;
diff --git a/lib/Target/X86/X86FixupBWInsts.cpp b/lib/Target/X86/X86FixupBWInsts.cpp
new file mode 100644
index 0000000000000..90e758dc2e026
--- /dev/null
+++ b/lib/Target/X86/X86FixupBWInsts.cpp
@@ -0,0 +1,371 @@
+//===-- X86FixupBWInsts.cpp - Fixup Byte or Word instructions -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file defines the pass that looks through the machine instructions
+/// late in the compilation, and finds byte or word instructions that
+/// can be profitably replaced with 32 bit instructions that give equivalent
+/// results for the bits of the results that are used. There are two possible
+/// reasons to do this.
+///
+/// One reason is to avoid false-dependences on the upper portions
+/// of the registers. Only instructions that have a destination register
+/// which is not in any of the source registers can be affected by this.
+/// Any instruction where one of the source registers is also the destination
+/// register is unaffected, because it has a true dependence on the source
+/// register already. So, this consideration primarily affects load
+/// instructions and register-to-register moves. It would
+/// seem like cmov(s) would also be affected, but because of the way cmov is
+/// really implemented by most machines as reading both the destination and
+/// and source regsters, and then "merging" the two based on a condition,
+/// it really already should be considered as having a true dependence on the
+/// destination register as well.
+///
+/// The other reason to do this is for potential code size savings. Word
+/// operations need an extra override byte compared to their 32 bit
+/// versions. So this can convert many word operations to their larger
+/// size, saving a byte in encoding. This could introduce partial register
+/// dependences where none existed however. As an example take:
+/// orw ax, $0x1000
+/// addw ax, $3
+/// now if this were to get transformed into
+/// orw ax, $1000
+/// addl eax, $3
+/// because the addl encodes shorter than the addw, this would introduce
+/// a use of a register that was only partially written earlier. On older
+/// Intel processors this can be quite a performance penalty, so this should
+/// probably only be done when it can be proven that a new partial dependence
+/// wouldn't be created, or when your know a newer processor is being
+/// targeted, or when optimizing for minimum code size.
+///
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86InstrInfo.h"
+#include "X86Subtarget.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/LivePhysRegs.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
+using namespace llvm;
+
+#define FIXUPBW_DESC "X86 Byte/Word Instruction Fixup"
+#define FIXUPBW_NAME "x86-fixup-bw-insts"
+
+#define DEBUG_TYPE FIXUPBW_NAME
+
+// Option to allow this optimization pass to have fine-grained control.
+// This is turned off by default so as not to affect a large number of
+// existing lit tests.
+static cl::opt<bool>
+ FixupBWInsts("fixup-byte-word-insts",
+ cl::desc("Change byte and word instructions to larger sizes"),
+ cl::init(true), cl::Hidden);
+
+namespace {
+class FixupBWInstPass : public MachineFunctionPass {
+ /// Loop over all of the instructions in the basic block replacing applicable
+ /// byte or word instructions with better alternatives.
+ void processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB);
+
+ /// This sets the \p SuperDestReg to the 32 bit super reg of the original
+ /// destination register of the MachineInstr passed in. It returns true if
+ /// that super register is dead just prior to \p OrigMI, and false if not.
+ bool getSuperRegDestIfDead(MachineInstr *OrigMI,
+ unsigned &SuperDestReg) const;
+
+ /// Change the MachineInstr \p MI into the equivalent extending load to 32 bit
+ /// register if it is safe to do so. Return the replacement instruction if
+ /// OK, otherwise return nullptr.
+ MachineInstr *tryReplaceLoad(unsigned New32BitOpcode, MachineInstr *MI) const;
+
+ /// Change the MachineInstr \p MI into the equivalent 32-bit copy if it is
+ /// safe to do so. Return the replacement instruction if OK, otherwise return
+ /// nullptr.
+ MachineInstr *tryReplaceCopy(MachineInstr *MI) const;
+
+ // Change the MachineInstr \p MI into an eqivalent 32 bit instruction if
+ // possible. Return the replacement instruction if OK, return nullptr
+ // otherwise. Set WasCandidate to true or false depending on whether the
+ // MI was a candidate for this sort of transformation.
+ MachineInstr *tryReplaceInstr(MachineInstr *MI, MachineBasicBlock &MBB,
+ bool &WasCandidate) const;
+public:
+ static char ID;
+
+ const char *getPassName() const override {
+ return FIXUPBW_DESC;
+ }
+
+ FixupBWInstPass() : MachineFunctionPass(ID) {
+ initializeFixupBWInstPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<MachineLoopInfo>(); // Machine loop info is used to
+ // guide some heuristics.
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ /// Loop over all of the basic blocks, replacing byte and word instructions by
+ /// equivalent 32 bit instructions where performance or code size can be
+ /// improved.
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
+private:
+ MachineFunction *MF;
+
+ /// Machine instruction info used throughout the class.
+ const X86InstrInfo *TII;
+
+ /// Local member for function's OptForSize attribute.
+ bool OptForSize;
+
+ /// Machine loop info used for guiding some heruistics.
+ MachineLoopInfo *MLI;
+
+ /// Register Liveness information after the current instruction.
+ LivePhysRegs LiveRegs;
+};
+char FixupBWInstPass::ID = 0;
+}
+
+INITIALIZE_PASS(FixupBWInstPass, FIXUPBW_NAME, FIXUPBW_DESC, false, false)
+
+FunctionPass *llvm::createX86FixupBWInsts() { return new FixupBWInstPass(); }
+
+bool FixupBWInstPass::runOnMachineFunction(MachineFunction &MF) {
+ if (!FixupBWInsts || skipFunction(*MF.getFunction()))
+ return false;
+
+ this->MF = &MF;
+ TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
+ OptForSize = MF.getFunction()->optForSize();
+ MLI = &getAnalysis<MachineLoopInfo>();
+ LiveRegs.init(&TII->getRegisterInfo());
+
+ DEBUG(dbgs() << "Start X86FixupBWInsts\n";);
+
+ // Process all basic blocks.
+ for (auto &MBB : MF)
+ processBasicBlock(MF, MBB);
+
+ DEBUG(dbgs() << "End X86FixupBWInsts\n";);
+
+ return true;
+}
+
+// TODO: This method of analysis can miss some legal cases, because the
+// super-register could be live into the address expression for a memory
+// reference for the instruction, and still be killed/last used by the
+// instruction. However, the existing query interfaces don't seem to
+// easily allow that to be checked.
+//
+// What we'd really like to know is whether after OrigMI, the
+// only portion of SuperDestReg that is alive is the portion that
+// was the destination register of OrigMI.
+bool FixupBWInstPass::getSuperRegDestIfDead(MachineInstr *OrigMI,
+ unsigned &SuperDestReg) const {
+ auto *TRI = &TII->getRegisterInfo();
+
+ unsigned OrigDestReg = OrigMI->getOperand(0).getReg();
+ SuperDestReg = getX86SubSuperRegister(OrigDestReg, 32);
+
+ const auto SubRegIdx = TRI->getSubRegIndex(SuperDestReg, OrigDestReg);
+
+ // Make sure that the sub-register that this instruction has as its
+ // destination is the lowest order sub-register of the super-register.
+ // If it isn't, then the register isn't really dead even if the
+ // super-register is considered dead.
+ if (SubRegIdx == X86::sub_8bit_hi)
+ return false;
+
+ if (LiveRegs.contains(SuperDestReg))
+ return false;
+
+ if (SubRegIdx == X86::sub_8bit) {
+ // In the case of byte registers, we also have to check that the upper
+ // byte register is also dead. That is considered to be independent of
+ // whether the super-register is dead.
+ unsigned UpperByteReg =
+ getX86SubSuperRegister(SuperDestReg, 8, /*High=*/true);
+
+ if (LiveRegs.contains(UpperByteReg))
+ return false;
+ }
+
+ return true;
+}
+
+MachineInstr *FixupBWInstPass::tryReplaceLoad(unsigned New32BitOpcode,
+ MachineInstr *MI) const {
+ unsigned NewDestReg;
+
+ // We are going to try to rewrite this load to a larger zero-extending
+ // load. This is safe if all portions of the 32 bit super-register
+ // of the original destination register, except for the original destination
+ // register are dead. getSuperRegDestIfDead checks that.
+ if (!getSuperRegDestIfDead(MI, NewDestReg))
+ return nullptr;
+
+ // Safe to change the instruction.
+ MachineInstrBuilder MIB =
+ BuildMI(*MF, MI->getDebugLoc(), TII->get(New32BitOpcode), NewDestReg);
+
+ unsigned NumArgs = MI->getNumOperands();
+ for (unsigned i = 1; i < NumArgs; ++i)
+ MIB.addOperand(MI->getOperand(i));
+
+ MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+
+ return MIB;
+}
+
+MachineInstr *FixupBWInstPass::tryReplaceCopy(MachineInstr *MI) const {
+ assert(MI->getNumExplicitOperands() == 2);
+ auto &OldDest = MI->getOperand(0);
+ auto &OldSrc = MI->getOperand(1);
+
+ unsigned NewDestReg;
+ if (!getSuperRegDestIfDead(MI, NewDestReg))
+ return nullptr;
+
+ unsigned NewSrcReg = getX86SubSuperRegister(OldSrc.getReg(), 32);
+
+ // This is only correct if we access the same subregister index: otherwise,
+ // we could try to replace "movb %ah, %al" with "movl %eax, %eax".
+ auto *TRI = &TII->getRegisterInfo();
+ if (TRI->getSubRegIndex(NewSrcReg, OldSrc.getReg()) !=
+ TRI->getSubRegIndex(NewDestReg, OldDest.getReg()))
+ return nullptr;
+
+ // Safe to change the instruction.
+ // Don't set src flags, as we don't know if we're also killing the superreg.
+ // However, the superregister might not be defined; make it explicit that
+ // we don't care about the higher bits by reading it as Undef, and adding
+ // an imp-use on the original subregister.
+ MachineInstrBuilder MIB =
+ BuildMI(*MF, MI->getDebugLoc(), TII->get(X86::MOV32rr), NewDestReg)
+ .addReg(NewSrcReg, RegState::Undef)
+ .addReg(OldSrc.getReg(), RegState::Implicit);
+
+ // Drop imp-defs/uses that would be redundant with the new def/use.
+ for (auto &Op : MI->implicit_operands())
+ if (Op.getReg() != (Op.isDef() ? NewDestReg : NewSrcReg))
+ MIB.addOperand(Op);
+
+ return MIB;
+}
+
+MachineInstr *FixupBWInstPass::tryReplaceInstr(
+ MachineInstr *MI, MachineBasicBlock &MBB,
+ bool &WasCandidate) const {
+ MachineInstr *NewMI = nullptr;
+ WasCandidate = false;
+
+ // See if this is an instruction of the type we are currently looking for.
+ switch (MI->getOpcode()) {
+
+ case X86::MOV8rm:
+ // Only replace 8 bit loads with the zero extending versions if
+ // in an inner most loop and not optimizing for size. This takes
+ // an extra byte to encode, and provides limited performance upside.
+ if (MachineLoop *ML = MLI->getLoopFor(&MBB)) {
+ if (ML->begin() == ML->end() && !OptForSize) {
+ NewMI = tryReplaceLoad(X86::MOVZX32rm8, MI);
+ WasCandidate = true;
+ }
+ }
+ break;
+
+ case X86::MOV16rm:
+ // Always try to replace 16 bit load with 32 bit zero extending.
+ // Code size is the same, and there is sometimes a perf advantage
+ // from eliminating a false dependence on the upper portion of
+ // the register.
+ NewMI = tryReplaceLoad(X86::MOVZX32rm16, MI);
+ WasCandidate = true;
+ break;
+
+ case X86::MOV8rr:
+ case X86::MOV16rr:
+ // Always try to replace 8/16 bit copies with a 32 bit copy.
+ // Code size is either less (16) or equal (8), and there is sometimes a
+ // perf advantage from eliminating a false dependence on the upper portion
+ // of the register.
+ NewMI = tryReplaceCopy(MI);
+ WasCandidate = true;
+ break;
+
+ default:
+ // nothing to do here.
+ break;
+ }
+
+ return NewMI;
+}
+
+void FixupBWInstPass::processBasicBlock(MachineFunction &MF,
+ MachineBasicBlock &MBB) {
+
+ // This algorithm doesn't delete the instructions it is replacing
+ // right away. By leaving the existing instructions in place, the
+ // register liveness information doesn't change, and this makes the
+ // analysis that goes on be better than if the replaced instructions
+ // were immediately removed.
+ //
+ // This algorithm always creates a replacement instruction
+ // and notes that and the original in a data structure, until the
+ // whole BB has been analyzed. This keeps the replacement instructions
+ // from making it seem as if the larger register might be live.
+ SmallVector<std::pair<MachineInstr *, MachineInstr *>, 8> MIReplacements;
+
+ // Start computing liveness for this block. We iterate from the end to be able
+ // to update this for each instruction.
+ LiveRegs.clear();
+ // We run after PEI, so we need to AddPristinesAndCSRs.
+ LiveRegs.addLiveOuts(MBB);
+
+ bool WasCandidate = false;
+
+ for (auto I = MBB.rbegin(); I != MBB.rend(); ++I) {
+ MachineInstr *MI = &*I;
+
+ MachineInstr *NewMI = tryReplaceInstr(MI, MBB, WasCandidate);
+
+ // Add this to replacements if it was a candidate, even if NewMI is
+ // nullptr. We will revisit that in a bit.
+ if (WasCandidate) {
+ MIReplacements.push_back(std::make_pair(MI, NewMI));
+ }
+
+ // We're done with this instruction, update liveness for the next one.
+ LiveRegs.stepBackward(*MI);
+ }
+
+ while (!MIReplacements.empty()) {
+ MachineInstr *MI = MIReplacements.back().first;
+ MachineInstr *NewMI = MIReplacements.back().second;
+ MIReplacements.pop_back();
+ if (NewMI) {
+ MBB.insert(MI, NewMI);
+ MBB.erase(MI);
+ }
+ }
+}
diff --git a/lib/Target/X86/X86FixupLEAs.cpp b/lib/Target/X86/X86FixupLEAs.cpp
index 1dd69e8a6a5f8..013ee249a60fe 100644
--- a/lib/Target/X86/X86FixupLEAs.cpp
+++ b/lib/Target/X86/X86FixupLEAs.cpp
@@ -92,6 +92,12 @@ public:
/// if needed and when possible.
bool runOnMachineFunction(MachineFunction &MF) override;
+ // This pass runs after regalloc and doesn't support VReg operands.
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
private:
MachineFunction *MF;
const X86InstrInfo *TII; // Machine instruction info.
@@ -104,22 +110,22 @@ char FixupLEAPass::ID = 0;
MachineInstr *
FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI) const {
- MachineInstr *MI = MBBI;
- MachineInstr *NewMI;
- switch (MI->getOpcode()) {
+ MachineInstr &MI = *MBBI;
+ switch (MI.getOpcode()) {
case X86::MOV32rr:
case X86::MOV64rr: {
- const MachineOperand &Src = MI->getOperand(1);
- const MachineOperand &Dest = MI->getOperand(0);
- NewMI = BuildMI(*MF, MI->getDebugLoc(),
- TII->get(MI->getOpcode() == X86::MOV32rr ? X86::LEA32r
- : X86::LEA64r))
- .addOperand(Dest)
- .addOperand(Src)
- .addImm(1)
- .addReg(0)
- .addImm(0)
- .addReg(0);
+ const MachineOperand &Src = MI.getOperand(1);
+ const MachineOperand &Dest = MI.getOperand(0);
+ MachineInstr *NewMI =
+ BuildMI(*MF, MI.getDebugLoc(),
+ TII->get(MI.getOpcode() == X86::MOV32rr ? X86::LEA32r
+ : X86::LEA64r))
+ .addOperand(Dest)
+ .addOperand(Src)
+ .addImm(1)
+ .addReg(0)
+ .addImm(0)
+ .addReg(0);
MFI->insert(MBBI, NewMI); // Insert the new inst
return NewMI;
}
@@ -135,7 +141,7 @@ FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI,
case X86::ADD16ri8:
case X86::ADD16ri_DB:
case X86::ADD16ri8_DB:
- if (!MI->getOperand(2).isImm()) {
+ if (!MI.getOperand(2).isImm()) {
// convertToThreeAddress will call getImm()
// which requires isImm() to be true
return nullptr;
@@ -143,19 +149,22 @@ FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI,
break;
case X86::ADD16rr:
case X86::ADD16rr_DB:
- if (MI->getOperand(1).getReg() != MI->getOperand(2).getReg()) {
+ if (MI.getOperand(1).getReg() != MI.getOperand(2).getReg()) {
// if src1 != src2, then convertToThreeAddress will
// need to create a Virtual register, which we cannot do
// after register allocation.
return nullptr;
}
}
- return TII->convertToThreeAddress(MFI, MBBI, nullptr);
+ return TII->convertToThreeAddress(MFI, MI, nullptr);
}
FunctionPass *llvm::createX86FixupLEAs() { return new FixupLEAPass(); }
bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) {
+ if (skipFunction(*Func.getFunction()))
+ return false;
+
MF = &Func;
const X86Subtarget &ST = Func.getSubtarget<X86Subtarget>();
OptIncDec = !ST.slowIncDec() || Func.getFunction()->optForMinSize();
@@ -178,10 +187,10 @@ bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) {
FixupLEAPass::RegUsageState
FixupLEAPass::usesRegister(MachineOperand &p, MachineBasicBlock::iterator I) {
RegUsageState RegUsage = RU_NotUsed;
- MachineInstr *MI = I;
+ MachineInstr &MI = *I;
- for (unsigned int i = 0; i < MI->getNumOperands(); ++i) {
- MachineOperand &opnd = MI->getOperand(i);
+ for (unsigned int i = 0; i < MI.getNumOperands(); ++i) {
+ MachineOperand &opnd = MI.getOperand(i);
if (opnd.isReg() && opnd.getReg() == p.getReg()) {
if (opnd.isDef())
return RU_Write;
@@ -227,10 +236,10 @@ FixupLEAPass::searchBackwards(MachineOperand &p, MachineBasicBlock::iterator &I,
return CurInst;
}
InstrDistance += TII->getInstrLatency(
- MF->getSubtarget().getInstrItineraryData(), CurInst);
+ MF->getSubtarget().getInstrItineraryData(), *CurInst);
Found = getPreviousInstr(CurInst, MFI);
}
- return nullptr;
+ return MachineBasicBlock::iterator();
}
static inline bool isLEA(const int opcode) {
@@ -241,28 +250,28 @@ static inline bool isLEA(const int opcode) {
/// isLEASimpleIncOrDec - Does this LEA have one these forms:
/// lea %reg, 1(%reg)
/// lea %reg, -1(%reg)
-static inline bool isLEASimpleIncOrDec(MachineInstr *LEA) {
- unsigned SrcReg = LEA->getOperand(1 + X86::AddrBaseReg).getReg();
- unsigned DstReg = LEA->getOperand(0).getReg();
+static inline bool isLEASimpleIncOrDec(MachineInstr &LEA) {
+ unsigned SrcReg = LEA.getOperand(1 + X86::AddrBaseReg).getReg();
+ unsigned DstReg = LEA.getOperand(0).getReg();
unsigned AddrDispOp = 1 + X86::AddrDisp;
return SrcReg == DstReg &&
- LEA->getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
- LEA->getOperand(1 + X86::AddrSegmentReg).getReg() == 0 &&
- LEA->getOperand(AddrDispOp).isImm() &&
- (LEA->getOperand(AddrDispOp).getImm() == 1 ||
- LEA->getOperand(AddrDispOp).getImm() == -1);
+ LEA.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
+ LEA.getOperand(1 + X86::AddrSegmentReg).getReg() == 0 &&
+ LEA.getOperand(AddrDispOp).isImm() &&
+ (LEA.getOperand(AddrDispOp).getImm() == 1 ||
+ LEA.getOperand(AddrDispOp).getImm() == -1);
}
bool FixupLEAPass::fixupIncDec(MachineBasicBlock::iterator &I,
MachineFunction::iterator MFI) const {
- MachineInstr *MI = I;
- int Opcode = MI->getOpcode();
+ MachineInstr &MI = *I;
+ int Opcode = MI.getOpcode();
if (!isLEA(Opcode))
return false;
if (isLEASimpleIncOrDec(MI) && TII->isSafeToClobberEFLAGS(*MFI, I)) {
int NewOpcode;
- bool isINC = MI->getOperand(4).getImm() == 1;
+ bool isINC = MI.getOperand(4).getImm() == 1;
switch (Opcode) {
case X86::LEA16r:
NewOpcode = isINC ? X86::INC16r : X86::DEC16r;
@@ -277,9 +286,9 @@ bool FixupLEAPass::fixupIncDec(MachineBasicBlock::iterator &I,
}
MachineInstr *NewMI =
- BuildMI(*MFI, I, MI->getDebugLoc(), TII->get(NewOpcode))
- .addOperand(MI->getOperand(0))
- .addOperand(MI->getOperand(1));
+ BuildMI(*MFI, I, MI.getDebugLoc(), TII->get(NewOpcode))
+ .addOperand(MI.getOperand(0))
+ .addOperand(MI.getOperand(1));
MFI->erase(I);
I = static_cast<MachineBasicBlock::iterator>(NewMI);
return true;
@@ -290,17 +299,16 @@ bool FixupLEAPass::fixupIncDec(MachineBasicBlock::iterator &I,
void FixupLEAPass::processInstruction(MachineBasicBlock::iterator &I,
MachineFunction::iterator MFI) {
// Process a load, store, or LEA instruction.
- MachineInstr *MI = I;
- int opcode = MI->getOpcode();
- const MCInstrDesc &Desc = MI->getDesc();
- int AddrOffset = X86II::getMemoryOperandNo(Desc.TSFlags, opcode);
+ MachineInstr &MI = *I;
+ const MCInstrDesc &Desc = MI.getDesc();
+ int AddrOffset = X86II::getMemoryOperandNo(Desc.TSFlags);
if (AddrOffset >= 0) {
AddrOffset += X86II::getOperandBias(Desc);
- MachineOperand &p = MI->getOperand(AddrOffset + X86::AddrBaseReg);
+ MachineOperand &p = MI.getOperand(AddrOffset + X86::AddrBaseReg);
if (p.isReg() && p.getReg() != X86::ESP) {
seekLEAFixup(p, I, MFI);
}
- MachineOperand &q = MI->getOperand(AddrOffset + X86::AddrIndexReg);
+ MachineOperand &q = MI.getOperand(AddrOffset + X86::AddrIndexReg);
if (q.isReg() && q.getReg() != X86::ESP) {
seekLEAFixup(q, I, MFI);
}
@@ -311,7 +319,7 @@ void FixupLEAPass::seekLEAFixup(MachineOperand &p,
MachineBasicBlock::iterator &I,
MachineFunction::iterator MFI) {
MachineBasicBlock::iterator MBI = searchBackwards(p, I, MFI);
- if (MBI) {
+ if (MBI != MachineBasicBlock::iterator()) {
MachineInstr *NewMI = postRAConvertToLEA(MFI, MBI);
if (NewMI) {
++NumLEAs;
@@ -328,19 +336,19 @@ void FixupLEAPass::seekLEAFixup(MachineOperand &p,
void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
MachineFunction::iterator MFI) {
- MachineInstr *MI = I;
- const int opcode = MI->getOpcode();
+ MachineInstr &MI = *I;
+ const int opcode = MI.getOpcode();
if (!isLEA(opcode))
return;
- if (MI->getOperand(5).getReg() != 0 || !MI->getOperand(4).isImm() ||
+ if (MI.getOperand(5).getReg() != 0 || !MI.getOperand(4).isImm() ||
!TII->isSafeToClobberEFLAGS(*MFI, I))
return;
- const unsigned DstR = MI->getOperand(0).getReg();
- const unsigned SrcR1 = MI->getOperand(1).getReg();
- const unsigned SrcR2 = MI->getOperand(3).getReg();
+ const unsigned DstR = MI.getOperand(0).getReg();
+ const unsigned SrcR1 = MI.getOperand(1).getReg();
+ const unsigned SrcR2 = MI.getOperand(3).getReg();
if ((SrcR1 == 0 || SrcR1 != DstR) && (SrcR2 == 0 || SrcR2 != DstR))
return;
- if (MI->getOperand(2).getImm() > 1)
+ if (MI.getOperand(2).getImm() > 1)
return;
int addrr_opcode, addri_opcode;
switch (opcode) {
@@ -363,12 +371,12 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
DEBUG(dbgs() << "FixLEA: Candidate to replace:"; I->dump(););
DEBUG(dbgs() << "FixLEA: Replaced by: ";);
MachineInstr *NewMI = nullptr;
- const MachineOperand &Dst = MI->getOperand(0);
+ const MachineOperand &Dst = MI.getOperand(0);
// Make ADD instruction for two registers writing to LEA's destination
if (SrcR1 != 0 && SrcR2 != 0) {
- const MachineOperand &Src1 = MI->getOperand(SrcR1 == DstR ? 1 : 3);
- const MachineOperand &Src2 = MI->getOperand(SrcR1 == DstR ? 3 : 1);
- NewMI = BuildMI(*MF, MI->getDebugLoc(), TII->get(addrr_opcode))
+ const MachineOperand &Src1 = MI.getOperand(SrcR1 == DstR ? 1 : 3);
+ const MachineOperand &Src2 = MI.getOperand(SrcR1 == DstR ? 3 : 1);
+ NewMI = BuildMI(*MF, MI.getDebugLoc(), TII->get(addrr_opcode))
.addOperand(Dst)
.addOperand(Src1)
.addOperand(Src2);
@@ -376,12 +384,12 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
DEBUG(NewMI->dump(););
}
// Make ADD instruction for immediate
- if (MI->getOperand(4).getImm() != 0) {
- const MachineOperand &SrcR = MI->getOperand(SrcR1 == DstR ? 1 : 3);
- NewMI = BuildMI(*MF, MI->getDebugLoc(), TII->get(addri_opcode))
+ if (MI.getOperand(4).getImm() != 0) {
+ const MachineOperand &SrcR = MI.getOperand(SrcR1 == DstR ? 1 : 3);
+ NewMI = BuildMI(*MF, MI.getDebugLoc(), TII->get(addri_opcode))
.addOperand(Dst)
.addOperand(SrcR)
- .addImm(MI->getOperand(4).getImm());
+ .addImm(MI.getOperand(4).getImm());
MFI->insert(I, NewMI);
DEBUG(NewMI->dump(););
}
diff --git a/lib/Target/X86/X86FixupSetCC.cpp b/lib/Target/X86/X86FixupSetCC.cpp
new file mode 100644
index 0000000000000..fb317da953551
--- /dev/null
+++ b/lib/Target/X86/X86FixupSetCC.cpp
@@ -0,0 +1,186 @@
+//===---- X86FixupSetCC.cpp - optimize usage of LEA instructions ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a pass that fixes zero-extension of setcc patterns.
+// X86 setcc instructions are modeled to have no input arguments, and a single
+// GR8 output argument. This is consistent with other similar instructions
+// (e.g. movb), but means it is impossible to directly generate a setcc into
+// the lower GR8 of a specified GR32.
+// This means that ISel must select (zext (setcc)) into something like
+// seta %al; movzbl %al, %eax.
+// Unfortunately, this can cause a stall due to the partial register write
+// performed by the setcc. Instead, we can use:
+// xor %eax, %eax; seta %al
+// This both avoids the stall, and encodes shorter.
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86InstrInfo.h"
+#include "X86Subtarget.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "x86-fixup-setcc"
+
+STATISTIC(NumSubstZexts, "Number of setcc + zext pairs substituted");
+
+namespace {
+class X86FixupSetCCPass : public MachineFunctionPass {
+public:
+ X86FixupSetCCPass() : MachineFunctionPass(ID) {}
+
+ const char *getPassName() const override { return "X86 Fixup SetCC"; }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+private:
+ // Find the preceding instruction that imp-defs eflags.
+ MachineInstr *findFlagsImpDef(MachineBasicBlock *MBB,
+ MachineBasicBlock::reverse_iterator MI);
+
+ // Return true if MI imp-uses eflags.
+ bool impUsesFlags(MachineInstr *MI);
+
+ // Return true if this is the opcode of a SetCC instruction with a register
+ // output.
+ bool isSetCCr(unsigned Opode);
+
+ MachineRegisterInfo *MRI;
+ const X86InstrInfo *TII;
+
+ enum { SearchBound = 16 };
+
+ static char ID;
+};
+
+char X86FixupSetCCPass::ID = 0;
+}
+
+FunctionPass *llvm::createX86FixupSetCC() { return new X86FixupSetCCPass(); }
+
+bool X86FixupSetCCPass::isSetCCr(unsigned Opcode) {
+ switch (Opcode) {
+ default:
+ return false;
+ case X86::SETOr:
+ case X86::SETNOr:
+ case X86::SETBr:
+ case X86::SETAEr:
+ case X86::SETEr:
+ case X86::SETNEr:
+ case X86::SETBEr:
+ case X86::SETAr:
+ case X86::SETSr:
+ case X86::SETNSr:
+ case X86::SETPr:
+ case X86::SETNPr:
+ case X86::SETLr:
+ case X86::SETGEr:
+ case X86::SETLEr:
+ case X86::SETGr:
+ return true;
+ }
+}
+
+// We expect the instruction *immediately* before the setcc to imp-def
+// EFLAGS (because of scheduling glue). To make this less brittle w.r.t
+// scheduling, look backwards until we hit the beginning of the
+// basic-block, or a small bound (to avoid quadratic behavior).
+MachineInstr *
+X86FixupSetCCPass::findFlagsImpDef(MachineBasicBlock *MBB,
+ MachineBasicBlock::reverse_iterator MI) {
+ auto MBBStart = MBB->instr_rend();
+ for (int i = 0; (i < SearchBound) && (MI != MBBStart); ++i, ++MI)
+ for (auto &Op : MI->implicit_operands())
+ if ((Op.getReg() == X86::EFLAGS) && (Op.isDef()))
+ return &*MI;
+
+ return nullptr;
+}
+
+bool X86FixupSetCCPass::impUsesFlags(MachineInstr *MI) {
+ for (auto &Op : MI->implicit_operands())
+ if ((Op.getReg() == X86::EFLAGS) && (Op.isUse()))
+ return true;
+
+ return false;
+}
+
+bool X86FixupSetCCPass::runOnMachineFunction(MachineFunction &MF) {
+ bool Changed = false;
+ MRI = &MF.getRegInfo();
+ TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
+
+ SmallVector<MachineInstr*, 4> ToErase;
+
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ // Find a setcc that is used by a zext.
+ // This doesn't have to be the only use, the transformation is safe
+ // regardless.
+ if (!isSetCCr(MI.getOpcode()))
+ continue;
+
+ MachineInstr *ZExt = nullptr;
+ for (auto &Use : MRI->use_instructions(MI.getOperand(0).getReg()))
+ if (Use.getOpcode() == X86::MOVZX32rr8)
+ ZExt = &Use;
+
+ if (!ZExt)
+ continue;
+
+ // Find the preceding instruction that imp-defs eflags.
+ MachineInstr *FlagsDefMI = findFlagsImpDef(
+ MI.getParent(), MachineBasicBlock::reverse_iterator(&MI));
+ if (!FlagsDefMI)
+ continue;
+
+ // We'd like to put something that clobbers eflags directly before
+ // FlagsDefMI. This can't hurt anything after FlagsDefMI, because
+ // it, itself, by definition, clobbers eflags. But it may happen that
+ // FlagsDefMI also *uses* eflags, in which case the transformation is
+ // invalid.
+ if (impUsesFlags(FlagsDefMI))
+ continue;
+
+ ++NumSubstZexts;
+ Changed = true;
+
+ // On 32-bit, we need to be careful to force an ABCD register.
+ const TargetRegisterClass *RC = MF.getSubtarget<X86Subtarget>().is64Bit()
+ ? &X86::GR32RegClass
+ : &X86::GR32_ABCDRegClass;
+ unsigned ZeroReg = MRI->createVirtualRegister(RC);
+ unsigned InsertReg = MRI->createVirtualRegister(RC);
+
+ // Initialize a register with 0. This must go before the eflags def
+ BuildMI(MBB, FlagsDefMI, MI.getDebugLoc(), TII->get(X86::MOV32r0),
+ ZeroReg);
+
+ // X86 setcc only takes an output GR8, so fake a GR32 input by inserting
+ // the setcc result into the low byte of the zeroed register.
+ BuildMI(*ZExt->getParent(), ZExt, ZExt->getDebugLoc(),
+ TII->get(X86::INSERT_SUBREG), InsertReg)
+ .addReg(ZeroReg)
+ .addReg(MI.getOperand(0).getReg())
+ .addImm(X86::sub_8bit);
+ MRI->replaceRegWith(ZExt->getOperand(0).getReg(), InsertReg);
+ ToErase.push_back(ZExt);
+ }
+ }
+
+ for (auto &I : ToErase)
+ I->eraseFromParent();
+
+ return Changed;
+}
diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp
index 97bb8ab653a6c..55c1bff2bc18d 100644
--- a/lib/Target/X86/X86FloatingPoint.cpp
+++ b/lib/Target/X86/X86FloatingPoint.cpp
@@ -76,6 +76,11 @@ namespace {
bool runOnMachineFunction(MachineFunction &MF) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override { return "X86 FP Stackifier"; }
private:
@@ -222,7 +227,8 @@ namespace {
++NumFXCH;
}
- void duplicateToTop(unsigned RegNo, unsigned AsReg, MachineInstr *I) {
+ void duplicateToTop(unsigned RegNo, unsigned AsReg,
+ MachineBasicBlock::iterator I) {
DebugLoc dl = I == MBB->end() ? DebugLoc() : I->getDebugLoc();
unsigned STReg = getSTReg(RegNo);
pushReg(AsReg); // New register on top of stack
@@ -257,6 +263,7 @@ namespace {
bool processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB);
void handleCall(MachineBasicBlock::iterator &I);
+ void handleReturn(MachineBasicBlock::iterator &I);
void handleZeroArgFP(MachineBasicBlock::iterator &I);
void handleOneArgFP(MachineBasicBlock::iterator &I);
void handleOneArgFPRW(MachineBasicBlock::iterator &I);
@@ -266,9 +273,9 @@ namespace {
void handleSpecialFP(MachineBasicBlock::iterator &I);
// Check if a COPY instruction is using FP registers.
- static bool isFPCopy(MachineInstr *MI) {
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned SrcReg = MI->getOperand(1).getReg();
+ static bool isFPCopy(MachineInstr &MI) {
+ unsigned DstReg = MI.getOperand(0).getReg();
+ unsigned SrcReg = MI.getOperand(1).getReg();
return X86::RFP80RegClass.contains(DstReg) ||
X86::RFP80RegClass.contains(SrcReg);
@@ -367,21 +374,21 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
setupBlockStack();
for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) {
- MachineInstr *MI = I;
- uint64_t Flags = MI->getDesc().TSFlags;
+ MachineInstr &MI = *I;
+ uint64_t Flags = MI.getDesc().TSFlags;
unsigned FPInstClass = Flags & X86II::FPTypeMask;
- if (MI->isInlineAsm())
+ if (MI.isInlineAsm())
FPInstClass = X86II::SpecialFP;
- if (MI->isCopy() && isFPCopy(MI))
+ if (MI.isCopy() && isFPCopy(MI))
FPInstClass = X86II::SpecialFP;
- if (MI->isImplicitDef() &&
- X86::RFP80RegClass.contains(MI->getOperand(0).getReg()))
+ if (MI.isImplicitDef() &&
+ X86::RFP80RegClass.contains(MI.getOperand(0).getReg()))
FPInstClass = X86II::SpecialFP;
- if (MI->isCall())
+ if (MI.isCall())
FPInstClass = X86II::SpecialFP;
if (FPInstClass == X86II::NotFP)
@@ -389,16 +396,16 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
MachineInstr *PrevMI = nullptr;
if (I != BB.begin())
- PrevMI = std::prev(I);
+ PrevMI = &*std::prev(I);
++NumFP; // Keep track of # of pseudo instrs
- DEBUG(dbgs() << "\nFPInst:\t" << *MI);
+ DEBUG(dbgs() << "\nFPInst:\t" << MI);
// Get dead variables list now because the MI pointer may be deleted as part
// of processing!
SmallVector<unsigned, 8> DeadRegs;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI.getOperand(i);
if (MO.isReg() && MO.isDead())
DeadRegs.push_back(MO.getReg());
}
@@ -427,20 +434,22 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
}
// Print out all of the instructions expanded to if -debug
- DEBUG(
- MachineBasicBlock::iterator PrevI(PrevMI);
+ DEBUG({
+ MachineBasicBlock::iterator PrevI = PrevMI;
if (I == PrevI) {
dbgs() << "Just deleted pseudo instruction\n";
} else {
MachineBasicBlock::iterator Start = I;
// Rewind to first instruction newly inserted.
- while (Start != BB.begin() && std::prev(Start) != PrevI) --Start;
+ while (Start != BB.begin() && std::prev(Start) != PrevI)
+ --Start;
dbgs() << "Inserted instructions:\n\t";
Start->print(dbgs());
- while (++Start != std::next(I)) {}
+ while (++Start != std::next(I)) {
+ }
}
dumpStack();
- );
+ });
(void)PrevMI;
Changed = true;
@@ -779,8 +788,8 @@ static const TableEntry PopTable[] = {
/// instruction if it was modified in place.
///
void FPS::popStackAfter(MachineBasicBlock::iterator &I) {
- MachineInstr* MI = I;
- DebugLoc dl = MI->getDebugLoc();
+ MachineInstr &MI = *I;
+ const DebugLoc &dl = MI.getDebugLoc();
ASSERT_SORTED(PopTable);
if (StackTop == 0)
report_fatal_error("Cannot pop empty stack!");
@@ -943,15 +952,102 @@ void FPS::handleCall(MachineBasicBlock::iterator &I) {
pushReg(N - I - 1);
}
+/// If RET has an FP register use operand, pass the first one in ST(0) and
+/// the second one in ST(1).
+void FPS::handleReturn(MachineBasicBlock::iterator &I) {
+ MachineInstr &MI = *I;
+
+ // Find the register operands.
+ unsigned FirstFPRegOp = ~0U, SecondFPRegOp = ~0U;
+ unsigned LiveMask = 0;
+
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &Op = MI.getOperand(i);
+ if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
+ continue;
+ // FP Register uses must be kills unless there are two uses of the same
+ // register, in which case only one will be a kill.
+ assert(Op.isUse() &&
+ (Op.isKill() || // Marked kill.
+ getFPReg(Op) == FirstFPRegOp || // Second instance.
+ MI.killsRegister(Op.getReg())) && // Later use is marked kill.
+ "Ret only defs operands, and values aren't live beyond it");
+
+ if (FirstFPRegOp == ~0U)
+ FirstFPRegOp = getFPReg(Op);
+ else {
+ assert(SecondFPRegOp == ~0U && "More than two fp operands!");
+ SecondFPRegOp = getFPReg(Op);
+ }
+ LiveMask |= (1 << getFPReg(Op));
+
+ // Remove the operand so that later passes don't see it.
+ MI.RemoveOperand(i);
+ --i;
+ --e;
+ }
+
+ // We may have been carrying spurious live-ins, so make sure only the
+ // returned registers are left live.
+ adjustLiveRegs(LiveMask, MI);
+ if (!LiveMask) return; // Quick check to see if any are possible.
+
+ // There are only four possibilities here:
+ // 1) we are returning a single FP value. In this case, it has to be in
+ // ST(0) already, so just declare success by removing the value from the
+ // FP Stack.
+ if (SecondFPRegOp == ~0U) {
+ // Assert that the top of stack contains the right FP register.
+ assert(StackTop == 1 && FirstFPRegOp == getStackEntry(0) &&
+ "Top of stack not the right register for RET!");
+
+ // Ok, everything is good, mark the value as not being on the stack
+ // anymore so that our assertion about the stack being empty at end of
+ // block doesn't fire.
+ StackTop = 0;
+ return;
+ }
+
+ // Otherwise, we are returning two values:
+ // 2) If returning the same value for both, we only have one thing in the FP
+ // stack. Consider: RET FP1, FP1
+ if (StackTop == 1) {
+ assert(FirstFPRegOp == SecondFPRegOp && FirstFPRegOp == getStackEntry(0)&&
+ "Stack misconfiguration for RET!");
+
+ // Duplicate the TOS so that we return it twice. Just pick some other FPx
+ // register to hold it.
+ unsigned NewReg = ScratchFPReg;
+ duplicateToTop(FirstFPRegOp, NewReg, MI);
+ FirstFPRegOp = NewReg;
+ }
+
+ /// Okay we know we have two different FPx operands now:
+ assert(StackTop == 2 && "Must have two values live!");
+
+ /// 3) If SecondFPRegOp is currently in ST(0) and FirstFPRegOp is currently
+ /// in ST(1). In this case, emit an fxch.
+ if (getStackEntry(0) == SecondFPRegOp) {
+ assert(getStackEntry(1) == FirstFPRegOp && "Unknown regs live");
+ moveToTop(FirstFPRegOp, MI);
+ }
+
+ /// 4) Finally, FirstFPRegOp must be in ST(0) and SecondFPRegOp must be in
+ /// ST(1). Just remove both from our understanding of the stack and return.
+ assert(getStackEntry(0) == FirstFPRegOp && "Unknown regs live");
+ assert(getStackEntry(1) == SecondFPRegOp && "Unknown regs live");
+ StackTop = 0;
+}
+
/// handleZeroArgFP - ST(0) = fld0 ST(0) = flds <mem>
///
void FPS::handleZeroArgFP(MachineBasicBlock::iterator &I) {
- MachineInstr *MI = I;
- unsigned DestReg = getFPReg(MI->getOperand(0));
+ MachineInstr &MI = *I;
+ unsigned DestReg = getFPReg(MI.getOperand(0));
// Change from the pseudo instruction to the concrete instruction.
- MI->RemoveOperand(0); // Remove the explicit ST(0) operand
- MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
+ MI.RemoveOperand(0); // Remove the explicit ST(0) operand
+ MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
// Result gets pushed on the stack.
pushReg(DestReg);
@@ -960,14 +1056,14 @@ void FPS::handleZeroArgFP(MachineBasicBlock::iterator &I) {
/// handleOneArgFP - fst <mem>, ST(0)
///
void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
- MachineInstr *MI = I;
- unsigned NumOps = MI->getDesc().getNumOperands();
+ MachineInstr &MI = *I;
+ unsigned NumOps = MI.getDesc().getNumOperands();
assert((NumOps == X86::AddrNumOperands + 1 || NumOps == 1) &&
"Can only handle fst* & ftst instructions!");
// Is this the last use of the source register?
- unsigned Reg = getFPReg(MI->getOperand(NumOps-1));
- bool KillsSrc = MI->killsRegister(X86::FP0+Reg);
+ unsigned Reg = getFPReg(MI.getOperand(NumOps - 1));
+ bool KillsSrc = MI.killsRegister(X86::FP0 + Reg);
// FISTP64m is strange because there isn't a non-popping versions.
// If we have one _and_ we don't want to pop the operand, duplicate the value
@@ -975,34 +1071,31 @@ void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
// always ok.
// Ditto FISTTP16m, FISTTP32m, FISTTP64m, ST_FpP80m.
//
- if (!KillsSrc &&
- (MI->getOpcode() == X86::IST_Fp64m32 ||
- MI->getOpcode() == X86::ISTT_Fp16m32 ||
- MI->getOpcode() == X86::ISTT_Fp32m32 ||
- MI->getOpcode() == X86::ISTT_Fp64m32 ||
- MI->getOpcode() == X86::IST_Fp64m64 ||
- MI->getOpcode() == X86::ISTT_Fp16m64 ||
- MI->getOpcode() == X86::ISTT_Fp32m64 ||
- MI->getOpcode() == X86::ISTT_Fp64m64 ||
- MI->getOpcode() == X86::IST_Fp64m80 ||
- MI->getOpcode() == X86::ISTT_Fp16m80 ||
- MI->getOpcode() == X86::ISTT_Fp32m80 ||
- MI->getOpcode() == X86::ISTT_Fp64m80 ||
- MI->getOpcode() == X86::ST_FpP80m)) {
+ if (!KillsSrc && (MI.getOpcode() == X86::IST_Fp64m32 ||
+ MI.getOpcode() == X86::ISTT_Fp16m32 ||
+ MI.getOpcode() == X86::ISTT_Fp32m32 ||
+ MI.getOpcode() == X86::ISTT_Fp64m32 ||
+ MI.getOpcode() == X86::IST_Fp64m64 ||
+ MI.getOpcode() == X86::ISTT_Fp16m64 ||
+ MI.getOpcode() == X86::ISTT_Fp32m64 ||
+ MI.getOpcode() == X86::ISTT_Fp64m64 ||
+ MI.getOpcode() == X86::IST_Fp64m80 ||
+ MI.getOpcode() == X86::ISTT_Fp16m80 ||
+ MI.getOpcode() == X86::ISTT_Fp32m80 ||
+ MI.getOpcode() == X86::ISTT_Fp64m80 ||
+ MI.getOpcode() == X86::ST_FpP80m)) {
duplicateToTop(Reg, ScratchFPReg, I);
} else {
moveToTop(Reg, I); // Move to the top of the stack...
}
// Convert from the pseudo instruction to the concrete instruction.
- MI->RemoveOperand(NumOps-1); // Remove explicit ST(0) operand
- MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
-
- if (MI->getOpcode() == X86::IST_FP64m ||
- MI->getOpcode() == X86::ISTT_FP16m ||
- MI->getOpcode() == X86::ISTT_FP32m ||
- MI->getOpcode() == X86::ISTT_FP64m ||
- MI->getOpcode() == X86::ST_FP80m) {
+ MI.RemoveOperand(NumOps - 1); // Remove explicit ST(0) operand
+ MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
+
+ if (MI.getOpcode() == X86::IST_FP64m || MI.getOpcode() == X86::ISTT_FP16m ||
+ MI.getOpcode() == X86::ISTT_FP32m || MI.getOpcode() == X86::ISTT_FP64m ||
+ MI.getOpcode() == X86::ST_FP80m) {
if (StackTop == 0)
report_fatal_error("Stack empty??");
--StackTop;
@@ -1021,15 +1114,15 @@ void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
/// R1 = fadd R2, [mem]
///
void FPS::handleOneArgFPRW(MachineBasicBlock::iterator &I) {
- MachineInstr *MI = I;
+ MachineInstr &MI = *I;
#ifndef NDEBUG
- unsigned NumOps = MI->getDesc().getNumOperands();
+ unsigned NumOps = MI.getDesc().getNumOperands();
assert(NumOps >= 2 && "FPRW instructions must have 2 ops!!");
#endif
// Is this the last use of the source register?
- unsigned Reg = getFPReg(MI->getOperand(1));
- bool KillsSrc = MI->killsRegister(X86::FP0+Reg);
+ unsigned Reg = getFPReg(MI.getOperand(1));
+ bool KillsSrc = MI.killsRegister(X86::FP0 + Reg);
if (KillsSrc) {
// If this is the last use of the source register, just make sure it's on
@@ -1038,17 +1131,17 @@ void FPS::handleOneArgFPRW(MachineBasicBlock::iterator &I) {
if (StackTop == 0)
report_fatal_error("Stack cannot be empty!");
--StackTop;
- pushReg(getFPReg(MI->getOperand(0)));
+ pushReg(getFPReg(MI.getOperand(0)));
} else {
// If this is not the last use of the source register, _copy_ it to the top
// of the stack.
- duplicateToTop(Reg, getFPReg(MI->getOperand(0)), I);
+ duplicateToTop(Reg, getFPReg(MI.getOperand(0)), I);
}
// Change from the pseudo instruction to the concrete instruction.
- MI->RemoveOperand(1); // Drop the source operand.
- MI->RemoveOperand(0); // Drop the destination operand.
- MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
+ MI.RemoveOperand(1); // Drop the source operand.
+ MI.RemoveOperand(0); // Drop the destination operand.
+ MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
}
@@ -1132,16 +1225,16 @@ static const TableEntry ReverseSTiTable[] = {
void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
ASSERT_SORTED(ForwardST0Table); ASSERT_SORTED(ReverseST0Table);
ASSERT_SORTED(ForwardSTiTable); ASSERT_SORTED(ReverseSTiTable);
- MachineInstr *MI = I;
+ MachineInstr &MI = *I;
- unsigned NumOperands = MI->getDesc().getNumOperands();
+ unsigned NumOperands = MI.getDesc().getNumOperands();
assert(NumOperands == 3 && "Illegal TwoArgFP instruction!");
- unsigned Dest = getFPReg(MI->getOperand(0));
- unsigned Op0 = getFPReg(MI->getOperand(NumOperands-2));
- unsigned Op1 = getFPReg(MI->getOperand(NumOperands-1));
- bool KillsOp0 = MI->killsRegister(X86::FP0+Op0);
- bool KillsOp1 = MI->killsRegister(X86::FP0+Op1);
- DebugLoc dl = MI->getDebugLoc();
+ unsigned Dest = getFPReg(MI.getOperand(0));
+ unsigned Op0 = getFPReg(MI.getOperand(NumOperands - 2));
+ unsigned Op1 = getFPReg(MI.getOperand(NumOperands - 1));
+ bool KillsOp0 = MI.killsRegister(X86::FP0 + Op0);
+ bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1);
+ DebugLoc dl = MI.getDebugLoc();
unsigned TOS = getStackEntry(0);
@@ -1198,14 +1291,14 @@ void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
InstTable = ReverseSTiTable;
}
- int Opcode = Lookup(InstTable, MI->getOpcode());
+ int Opcode = Lookup(InstTable, MI.getOpcode());
assert(Opcode != -1 && "Unknown TwoArgFP pseudo instruction!");
// NotTOS - The register which is not on the top of stack...
unsigned NotTOS = (TOS == Op0) ? Op1 : Op0;
// Replace the old instruction with a new instruction
- MBB->remove(I++);
+ MBB->remove(&*I++);
I = BuildMI(*MBB, I, dl, TII->get(Opcode)).addReg(getSTReg(NotTOS));
// If both operands are killed, pop one off of the stack in addition to
@@ -1221,7 +1314,7 @@ void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
assert(UpdatedSlot < StackTop && Dest < 7);
Stack[UpdatedSlot] = Dest;
RegMap[Dest] = UpdatedSlot;
- MBB->getParent()->DeleteMachineInstr(MI); // Remove the old instruction
+ MBB->getParent()->DeleteMachineInstr(&MI); // Remove the old instruction
}
/// handleCompareFP - Handle FUCOM and FUCOMI instructions, which have two FP
@@ -1230,23 +1323,23 @@ void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
void FPS::handleCompareFP(MachineBasicBlock::iterator &I) {
ASSERT_SORTED(ForwardST0Table); ASSERT_SORTED(ReverseST0Table);
ASSERT_SORTED(ForwardSTiTable); ASSERT_SORTED(ReverseSTiTable);
- MachineInstr *MI = I;
+ MachineInstr &MI = *I;
- unsigned NumOperands = MI->getDesc().getNumOperands();
+ unsigned NumOperands = MI.getDesc().getNumOperands();
assert(NumOperands == 2 && "Illegal FUCOM* instruction!");
- unsigned Op0 = getFPReg(MI->getOperand(NumOperands-2));
- unsigned Op1 = getFPReg(MI->getOperand(NumOperands-1));
- bool KillsOp0 = MI->killsRegister(X86::FP0+Op0);
- bool KillsOp1 = MI->killsRegister(X86::FP0+Op1);
+ unsigned Op0 = getFPReg(MI.getOperand(NumOperands - 2));
+ unsigned Op1 = getFPReg(MI.getOperand(NumOperands - 1));
+ bool KillsOp0 = MI.killsRegister(X86::FP0 + Op0);
+ bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1);
// Make sure the first operand is on the top of stack, the other one can be
// anywhere.
moveToTop(Op0, I);
// Change from the pseudo instruction to the concrete instruction.
- MI->getOperand(0).setReg(getSTReg(Op1));
- MI->RemoveOperand(1);
- MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
+ MI.getOperand(0).setReg(getSTReg(Op1));
+ MI.RemoveOperand(1);
+ MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
// If any of the operands are killed by this instruction, free them.
if (KillsOp0) freeStackSlotAfter(I, Op0);
@@ -1258,21 +1351,21 @@ void FPS::handleCompareFP(MachineBasicBlock::iterator &I) {
/// instructions require that the first operand is at the top of the stack, but
/// otherwise don't modify the stack at all.
void FPS::handleCondMovFP(MachineBasicBlock::iterator &I) {
- MachineInstr *MI = I;
+ MachineInstr &MI = *I;
- unsigned Op0 = getFPReg(MI->getOperand(0));
- unsigned Op1 = getFPReg(MI->getOperand(2));
- bool KillsOp1 = MI->killsRegister(X86::FP0+Op1);
+ unsigned Op0 = getFPReg(MI.getOperand(0));
+ unsigned Op1 = getFPReg(MI.getOperand(2));
+ bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1);
// The first operand *must* be on the top of the stack.
moveToTop(Op0, I);
// Change the second operand to the stack register that the operand is in.
// Change from the pseudo instruction to the concrete instruction.
- MI->RemoveOperand(0);
- MI->RemoveOperand(1);
- MI->getOperand(0).setReg(getSTReg(Op1));
- MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
+ MI.RemoveOperand(0);
+ MI.RemoveOperand(1);
+ MI.getOperand(0).setReg(getSTReg(Op1));
+ MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
// If we kill the second operand, make sure to pop it from the stack.
if (Op0 != Op1 && KillsOp1) {
@@ -1287,20 +1380,25 @@ void FPS::handleCondMovFP(MachineBasicBlock::iterator &I) {
/// instructions.
///
void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
- MachineInstr *MI = Inst;
+ MachineInstr &MI = *Inst;
- if (MI->isCall()) {
+ if (MI.isCall()) {
handleCall(Inst);
return;
}
- switch (MI->getOpcode()) {
+ if (MI.isReturn()) {
+ handleReturn(Inst);
+ return;
+ }
+
+ switch (MI.getOpcode()) {
default: llvm_unreachable("Unknown SpecialFP instruction!");
case TargetOpcode::COPY: {
// We handle three kinds of copies: FP <- FP, FP <- ST, and ST <- FP.
- const MachineOperand &MO1 = MI->getOperand(1);
- const MachineOperand &MO0 = MI->getOperand(0);
- bool KillsSrc = MI->killsRegister(MO1.getReg());
+ const MachineOperand &MO1 = MI.getOperand(1);
+ const MachineOperand &MO0 = MI.getOperand(0);
+ bool KillsSrc = MI.killsRegister(MO1.getReg());
// FP <- FP copy.
unsigned DstFP = getFPReg(MO0);
@@ -1322,9 +1420,9 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
case TargetOpcode::IMPLICIT_DEF: {
// All FP registers must be explicitly defined, so load a 0 instead.
- unsigned Reg = MI->getOperand(0).getReg() - X86::FP0;
+ unsigned Reg = MI.getOperand(0).getReg() - X86::FP0;
DEBUG(dbgs() << "Emitting LD_F0 for implicit FP" << Reg << '\n');
- BuildMI(*MBB, Inst, MI->getDebugLoc(), TII->get(X86::LD_F0));
+ BuildMI(*MBB, Inst, MI.getDebugLoc(), TII->get(X86::LD_F0));
pushReg(Reg);
break;
}
@@ -1368,14 +1466,14 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
SmallSet<unsigned, 1> FRegIdx;
unsigned RCID;
- for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
- i != e && MI->getOperand(i).isImm(); i += 1 + NumOps) {
- unsigned Flags = MI->getOperand(i).getImm();
+ for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI.getNumOperands();
+ i != e && MI.getOperand(i).isImm(); i += 1 + NumOps) {
+ unsigned Flags = MI.getOperand(i).getImm();
NumOps = InlineAsm::getNumOperandRegisters(Flags);
if (NumOps != 1)
continue;
- const MachineOperand &MO = MI->getOperand(i + 1);
+ const MachineOperand &MO = MI.getOperand(i + 1);
if (!MO.isReg())
continue;
unsigned STReg = MO.getReg() - X86::FP0;
@@ -1408,24 +1506,24 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
}
if (STUses && !isMask_32(STUses))
- MI->emitError("fixed input regs must be last on the x87 stack");
+ MI.emitError("fixed input regs must be last on the x87 stack");
unsigned NumSTUses = countTrailingOnes(STUses);
// Defs must be contiguous from the stack top. ST0-STn.
if (STDefs && !isMask_32(STDefs)) {
- MI->emitError("output regs must be last on the x87 stack");
+ MI.emitError("output regs must be last on the x87 stack");
STDefs = NextPowerOf2(STDefs) - 1;
}
unsigned NumSTDefs = countTrailingOnes(STDefs);
// So must the clobbered stack slots. ST0-STm, m >= n.
if (STClobbers && !isMask_32(STDefs | STClobbers))
- MI->emitError("clobbers must be last on the x87 stack");
+ MI.emitError("clobbers must be last on the x87 stack");
// Popped inputs are the ones that are also clobbered or defined.
unsigned STPopped = STUses & (STDefs | STClobbers);
if (STPopped && !isMask_32(STPopped))
- MI->emitError("implicitly popped regs must be last on the x87 stack");
+ MI.emitError("implicitly popped regs must be last on the x87 stack");
unsigned NumSTPopped = countTrailingOnes(STPopped);
DEBUG(dbgs() << "Asm uses " << NumSTUses << " fixed regs, pops "
@@ -1434,9 +1532,9 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
#ifndef NDEBUG
// If any input operand uses constraint "f", all output register
// constraints must be early-clobber defs.
- for (unsigned I = 0, E = MI->getNumOperands(); I < E; ++I)
+ for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I)
if (FRegIdx.count(I)) {
- assert((1 << getFPReg(MI->getOperand(I)) & STDefs) == 0 &&
+ assert((1 << getFPReg(MI.getOperand(I)) & STDefs) == 0 &&
"Operands with constraint \"f\" cannot overlap with defs");
}
#endif
@@ -1444,8 +1542,8 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
// Collect all FP registers (register operands with constraints "t", "u",
// and "f") to kill afer the instruction.
unsigned FPKills = ((1u << NumFPRegs) - 1) & ~0xff;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &Op = MI->getOperand(i);
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &Op = MI.getOperand(i);
if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
continue;
unsigned FPReg = getFPReg(Op);
@@ -1470,8 +1568,8 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
DEBUG({dbgs() << "Before asm: "; dumpStack();});
// With the stack layout fixed, rewrite the FP registers.
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &Op = MI->getOperand(i);
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &Op = MI.getOperand(i);
if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
continue;
@@ -1508,94 +1606,6 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
// Don't delete the inline asm!
return;
}
-
- case X86::RETQ:
- case X86::RETL:
- case X86::RETIL:
- case X86::RETIQ:
- // If RET has an FP register use operand, pass the first one in ST(0) and
- // the second one in ST(1).
-
- // Find the register operands.
- unsigned FirstFPRegOp = ~0U, SecondFPRegOp = ~0U;
- unsigned LiveMask = 0;
-
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &Op = MI->getOperand(i);
- if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
- continue;
- // FP Register uses must be kills unless there are two uses of the same
- // register, in which case only one will be a kill.
- assert(Op.isUse() &&
- (Op.isKill() || // Marked kill.
- getFPReg(Op) == FirstFPRegOp || // Second instance.
- MI->killsRegister(Op.getReg())) && // Later use is marked kill.
- "Ret only defs operands, and values aren't live beyond it");
-
- if (FirstFPRegOp == ~0U)
- FirstFPRegOp = getFPReg(Op);
- else {
- assert(SecondFPRegOp == ~0U && "More than two fp operands!");
- SecondFPRegOp = getFPReg(Op);
- }
- LiveMask |= (1 << getFPReg(Op));
-
- // Remove the operand so that later passes don't see it.
- MI->RemoveOperand(i);
- --i, --e;
- }
-
- // We may have been carrying spurious live-ins, so make sure only the returned
- // registers are left live.
- adjustLiveRegs(LiveMask, MI);
- if (!LiveMask) return; // Quick check to see if any are possible.
-
- // There are only four possibilities here:
- // 1) we are returning a single FP value. In this case, it has to be in
- // ST(0) already, so just declare success by removing the value from the
- // FP Stack.
- if (SecondFPRegOp == ~0U) {
- // Assert that the top of stack contains the right FP register.
- assert(StackTop == 1 && FirstFPRegOp == getStackEntry(0) &&
- "Top of stack not the right register for RET!");
-
- // Ok, everything is good, mark the value as not being on the stack
- // anymore so that our assertion about the stack being empty at end of
- // block doesn't fire.
- StackTop = 0;
- return;
- }
-
- // Otherwise, we are returning two values:
- // 2) If returning the same value for both, we only have one thing in the FP
- // stack. Consider: RET FP1, FP1
- if (StackTop == 1) {
- assert(FirstFPRegOp == SecondFPRegOp && FirstFPRegOp == getStackEntry(0)&&
- "Stack misconfiguration for RET!");
-
- // Duplicate the TOS so that we return it twice. Just pick some other FPx
- // register to hold it.
- unsigned NewReg = ScratchFPReg;
- duplicateToTop(FirstFPRegOp, NewReg, MI);
- FirstFPRegOp = NewReg;
- }
-
- /// Okay we know we have two different FPx operands now:
- assert(StackTop == 2 && "Must have two values live!");
-
- /// 3) If SecondFPRegOp is currently in ST(0) and FirstFPRegOp is currently
- /// in ST(1). In this case, emit an fxch.
- if (getStackEntry(0) == SecondFPRegOp) {
- assert(getStackEntry(1) == FirstFPRegOp && "Unknown regs live");
- moveToTop(FirstFPRegOp, MI);
- }
-
- /// 4) Finally, FirstFPRegOp must be in ST(0) and SecondFPRegOp must be in
- /// ST(1). Just remove both from our understanding of the stack and return.
- assert(getStackEntry(0) == FirstFPRegOp && "Unknown regs live");
- assert(getStackEntry(1) == SecondFPRegOp && "Unknown regs live");
- StackTop = 0;
- return;
}
Inst = MBB->erase(Inst); // Remove the pseudo instruction
@@ -1614,7 +1624,7 @@ void FPS::setKillFlags(MachineBasicBlock &MBB) const {
MBB.getParent()->getSubtarget().getRegisterInfo();
LivePhysRegs LPR(TRI);
- LPR.addLiveOuts(&MBB);
+ LPR.addLiveOuts(MBB);
for (MachineBasicBlock::reverse_iterator I = MBB.rbegin(), E = MBB.rend();
I != E; ++I) {
diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp
index f5ffe0cf7e880..03d925692adf8 100644
--- a/lib/Target/X86/X86FrameLowering.cpp
+++ b/lib/Target/X86/X86FrameLowering.cpp
@@ -159,6 +159,8 @@ static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
unsigned Opc = MBBI->getOpcode();
switch (Opc) {
default: return 0;
+ case TargetOpcode::PATCHABLE_RET:
+ case X86::RET:
case X86::RETL:
case X86::RETQ:
case X86::RETIL:
@@ -314,8 +316,8 @@ void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
}
MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL,
- int64_t Offset, bool InEpilogue) const {
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, int64_t Offset, bool InEpilogue) const {
assert(Offset != 0 && "zero offset stack adjustment requested");
// On Atom, using LEA to adjust SP is preferred, but using it in the epilogue
@@ -374,16 +376,33 @@ int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
unsigned Opc = PI->getOpcode();
int Offset = 0;
+ if (!doMergeWithPrevious && NI != MBB.end() &&
+ NI->getOpcode() == TargetOpcode::CFI_INSTRUCTION) {
+ // Don't merge with the next instruction if it has CFI.
+ return Offset;
+ }
+
if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
- Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
+ Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
PI->getOperand(0).getReg() == StackPtr){
+ assert(PI->getOperand(1).getReg() == StackPtr);
Offset += PI->getOperand(2).getImm();
MBB.erase(PI);
if (!doMergeWithPrevious) MBBI = NI;
+ } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
+ PI->getOperand(0).getReg() == StackPtr &&
+ PI->getOperand(1).getReg() == StackPtr &&
+ PI->getOperand(2).getImm() == 1 &&
+ PI->getOperand(3).getReg() == X86::NoRegister &&
+ PI->getOperand(5).getReg() == X86::NoRegister) {
+ // For LEAs we have: def = lea SP, FI, noreg, Offset, noreg.
+ Offset += PI->getOperand(4).getImm();
+ MBB.erase(PI);
+ if (!doMergeWithPrevious) MBBI = NI;
} else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
PI->getOperand(0).getReg() == StackPtr) {
+ assert(PI->getOperand(1).getReg() == StackPtr);
Offset -= PI->getOperand(2).getImm();
MBB.erase(PI);
if (!doMergeWithPrevious) MBBI = NI;
@@ -393,18 +412,18 @@ int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
}
void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL,
- MCCFIInstruction CFIInst) const {
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL,
+ const MCCFIInstruction &CFIInst) const {
MachineFunction &MF = *MBB.getParent();
unsigned CFIIndex = MF.getMMI().addFrameInst(CFIInst);
BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex);
}
-void
-X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- DebugLoc DL) const {
+void X86FrameLowering::emitCalleeSavedFrameMoves(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineModuleInfo &MMI = MF.getMMI();
@@ -429,7 +448,7 @@ X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
MachineInstr *X86FrameLowering::emitStackProbe(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL,
+ const DebugLoc &DL,
bool InProlog) const {
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
if (STI.isTargetWindowsCoreCLR()) {
@@ -457,6 +476,8 @@ void X86FrameLowering::inlineStackProbe(MachineFunction &MF,
}
if (ChkStkStub != nullptr) {
+ assert(!ChkStkStub->isBundled() &&
+ "Not expecting bundled instructions here");
MachineBasicBlock::iterator MBBI = std::next(ChkStkStub->getIterator());
assert(std::prev(MBBI).operator==(ChkStkStub) &&
"MBBI expected after __chkstk_stub.");
@@ -467,8 +488,8 @@ void X86FrameLowering::inlineStackProbe(MachineFunction &MF,
}
MachineInstr *X86FrameLowering::emitStackProbeInline(
- MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
assert(STI.is64Bit() && "different expansion needed for 32 bit");
assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR");
@@ -679,12 +700,12 @@ MachineInstr *X86FrameLowering::emitStackProbeInline(
// Possible TODO: physreg liveness for InProlog case.
- return ContinueMBBI;
+ return &*ContinueMBBI;
}
MachineInstr *X86FrameLowering::emitStackProbeCall(
MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
unsigned CallOp;
@@ -743,19 +764,19 @@ MachineInstr *X86FrameLowering::emitStackProbeCall(
ExpansionMBBI->setFlag(MachineInstr::FrameSetup);
}
- return MBBI;
+ return &*MBBI;
}
MachineInstr *X86FrameLowering::emitStackProbeInlineStub(
MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
assert(InProlog && "ChkStkStub called outside prolog!");
BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
.addExternalSymbol("__chkstk_stub");
- return MBBI;
+ return &*MBBI;
}
static unsigned calculateSetFPREG(uint64_t SPAdjust) {
@@ -786,7 +807,7 @@ uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) con
void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL, unsigned Reg,
+ const DebugLoc &DL, unsigned Reg,
uint64_t MaxAlign) const {
uint64_t Val = -MaxAlign;
unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val);
@@ -950,6 +971,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
!MF.shouldSplitStack()) { // Regular stack
uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
if (HasFP) MinSize += SlotSize;
+ X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0);
StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
MFI->setStackSize(StackSize);
}
@@ -1009,7 +1031,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// Callee-saved registers are pushed on stack before the stack is realigned.
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
- NumBytes = RoundUpToAlignment(NumBytes, MaxAlign);
+ NumBytes = alignTo(NumBytes, MaxAlign);
// Get the offset of the stack slot for the EBP register, which is
// guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
@@ -1130,7 +1152,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// virtual memory manager are allocated in correct sequence.
uint64_t AlignedNumBytes = NumBytes;
if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF))
- AlignedNumBytes = RoundUpToAlignment(AlignedNumBytes, MaxAlign);
+ AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);
if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
// Check whether EAX is livein for this block.
bool isEAXAlive = isEAXLiveIn(MBB);
@@ -1260,7 +1282,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
}
while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
- const MachineInstr *FrameInstr = &*MBBI;
+ const MachineInstr &FrameInstr = *MBBI;
++MBBI;
if (NeedsWinCFI) {
@@ -1360,6 +1382,18 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
if (PushedRegs)
emitCalleeSavedFrameMoves(MBB, MBBI, DL);
}
+
+ // X86 Interrupt handling function cannot assume anything about the direction
+ // flag (DF in EFLAGS register). Clear this flag by creating "cld" instruction
+ // in each prologue of interrupt handler function.
+ //
+ // FIXME: Create "cld" instruction only in these cases:
+ // 1. The interrupt handling function uses any of the "rep" instructions.
+ // 2. Interrupt handling function calls another function.
+ //
+ if (Fn->getCallingConv() == CallingConv::X86_INTR)
+ BuildMI(MBB, MBBI, DL, TII.get(X86::CLD))
+ .setMIFlag(MachineInstr::FrameSetup);
}
bool X86FrameLowering::canUseLEAForSPInEpilogue(
@@ -1373,8 +1407,8 @@ bool X86FrameLowering::canUseLEAForSPInEpilogue(
return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
}
-static bool isFuncletReturnInstr(MachineInstr *MI) {
- switch (MI->getOpcode()) {
+static bool isFuncletReturnInstr(MachineInstr &MI) {
+ switch (MI.getOpcode()) {
case X86::CATCHRET:
case X86::CLEANUPRET:
return true;
@@ -1400,11 +1434,10 @@ static bool isFuncletReturnInstr(MachineInstr *MI) {
unsigned
X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const {
const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo();
- // getFrameIndexReferenceFromSP has an out ref parameter for the stack
- // pointer register; pass a dummy that we ignore
unsigned SPReg;
- int Offset = getFrameIndexReferenceFromSP(MF, Info.PSPSymFrameIdx, SPReg);
- assert(Offset >= 0);
+ int Offset = getFrameIndexReferencePreferSP(MF, Info.PSPSymFrameIdx, SPReg,
+ /*IgnoreSPUpdates*/ true);
+ assert(Offset >= 0 && SPReg == TRI->getStackRegister());
return static_cast<unsigned>(Offset);
}
@@ -1429,18 +1462,25 @@ X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
// RBP is not included in the callee saved register block. After pushing RBP,
// everything is 16 byte aligned. Everything we allocate before an outgoing
// call must also be 16 byte aligned.
- unsigned FrameSizeMinusRBP =
- RoundUpToAlignment(CSSize + UsedSize, getStackAlignment());
+ unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlignment());
// Subtract out the size of the callee saved registers. This is how much stack
// each funclet will allocate.
return FrameSizeMinusRBP - CSSize;
}
+static bool isTailCallOpcode(unsigned Opc) {
+ return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi ||
+ Opc == X86::TCRETURNmi ||
+ Opc == X86::TCRETURNri64 || Opc == X86::TCRETURNdi64 ||
+ Opc == X86::TCRETURNmi64;
+}
+
void X86FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
+ unsigned RetOpcode = MBBI->getOpcode();
DebugLoc DL;
if (MBBI != MBB.end())
DL = MBBI->getDebugLoc();
@@ -1453,7 +1493,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
bool NeedsWinCFI =
IsWin64Prologue && MF.getFunction()->needsUnwindTableEntry();
- bool IsFunclet = isFuncletReturnInstr(MBBI);
+ bool IsFunclet = isFuncletReturnInstr(*MBBI);
MachineBasicBlock *TargetMBB = nullptr;
// Get the number of bytes to allocate from the FrameInfo.
@@ -1490,7 +1530,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// Callee-saved registers were pushed on stack before the stack was
// realigned.
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
- NumBytes = RoundUpToAlignment(FrameSize, MaxAlign);
+ NumBytes = alignTo(FrameSize, MaxAlign);
// Pop EBP.
BuildMI(MBB, MBBI, DL,
@@ -1589,15 +1629,17 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
if (NeedsWinCFI)
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
- // Add the return addr area delta back since we are not tail calling.
- int Offset = -1 * X86FI->getTCReturnAddrDelta();
- assert(Offset >= 0 && "TCDelta should never be positive");
- if (Offset) {
- MBBI = MBB.getFirstTerminator();
+ if (!isTailCallOpcode(RetOpcode)) {
+ // Add the return addr area delta back since we are not tail calling.
+ int Offset = -1 * X86FI->getTCReturnAddrDelta();
+ assert(Offset >= 0 && "TCDelta should never be positive");
+ if (Offset) {
+ MBBI = MBB.getFirstTerminator();
- // Check for possible merge with preceding ADD instruction.
- Offset += mergeSPUpdates(MBB, MBBI, true);
- emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
+ // Check for possible merge with preceding ADD instruction.
+ Offset += mergeSPUpdates(MBB, MBBI, true);
+ emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
+ }
}
}
@@ -1689,58 +1731,61 @@ int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
return Offset + FPDelta;
}
-// Simplified from getFrameIndexReference keeping only StackPointer cases
-int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
- int FI,
- unsigned &FrameReg) const {
+int
+X86FrameLowering::getFrameIndexReferencePreferSP(const MachineFunction &MF,
+ int FI, unsigned &FrameReg,
+ bool IgnoreSPUpdates) const {
+
const MachineFrameInfo *MFI = MF.getFrameInfo();
// Does not include any dynamic realign.
const uint64_t StackSize = MFI->getStackSize();
- {
-#ifndef NDEBUG
- // LLVM arranges the stack as follows:
- // ...
- // ARG2
- // ARG1
- // RETADDR
- // PUSH RBP <-- RBP points here
- // PUSH CSRs
- // ~~~~~~~ <-- possible stack realignment (non-win64)
- // ...
- // STACK OBJECTS
- // ... <-- RSP after prologue points here
- // ~~~~~~~ <-- possible stack realignment (win64)
- //
- // if (hasVarSizedObjects()):
- // ... <-- "base pointer" (ESI/RBX) points here
- // DYNAMIC ALLOCAS
- // ... <-- RSP points here
- //
- // Case 1: In the simple case of no stack realignment and no dynamic
- // allocas, both "fixed" stack objects (arguments and CSRs) are addressable
- // with fixed offsets from RSP.
- //
- // Case 2: In the case of stack realignment with no dynamic allocas, fixed
- // stack objects are addressed with RBP and regular stack objects with RSP.
- //
- // Case 3: In the case of dynamic allocas and stack realignment, RSP is used
- // to address stack arguments for outgoing calls and nothing else. The "base
- // pointer" points to local variables, and RBP points to fixed objects.
- //
- // In cases 2 and 3, we can only answer for non-fixed stack objects, and the
- // answer we give is relative to the SP after the prologue, and not the
- // SP in the middle of the function.
-
- assert((!MFI->isFixedObjectIndex(FI) || !TRI->needsStackRealignment(MF) ||
- STI.isTargetWin64()) &&
- "offset from fixed object to SP is not static");
-
- // We don't handle tail calls, and shouldn't be seeing them either.
- int TailCallReturnAddrDelta =
- MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta();
- assert(!(TailCallReturnAddrDelta < 0) && "we don't handle this case!");
-#endif
- }
+ // LLVM arranges the stack as follows:
+ // ...
+ // ARG2
+ // ARG1
+ // RETADDR
+ // PUSH RBP <-- RBP points here
+ // PUSH CSRs
+ // ~~~~~~~ <-- possible stack realignment (non-win64)
+ // ...
+ // STACK OBJECTS
+ // ... <-- RSP after prologue points here
+ // ~~~~~~~ <-- possible stack realignment (win64)
+ //
+ // if (hasVarSizedObjects()):
+ // ... <-- "base pointer" (ESI/RBX) points here
+ // DYNAMIC ALLOCAS
+ // ... <-- RSP points here
+ //
+ // Case 1: In the simple case of no stack realignment and no dynamic
+ // allocas, both "fixed" stack objects (arguments and CSRs) are addressable
+ // with fixed offsets from RSP.
+ //
+ // Case 2: In the case of stack realignment with no dynamic allocas, fixed
+ // stack objects are addressed with RBP and regular stack objects with RSP.
+ //
+ // Case 3: In the case of dynamic allocas and stack realignment, RSP is used
+ // to address stack arguments for outgoing calls and nothing else. The "base
+ // pointer" points to local variables, and RBP points to fixed objects.
+ //
+ // In cases 2 and 3, we can only answer for non-fixed stack objects, and the
+ // answer we give is relative to the SP after the prologue, and not the
+ // SP in the middle of the function.
+
+ if (MFI->isFixedObjectIndex(FI) && TRI->needsStackRealignment(MF) &&
+ !STI.isTargetWin64())
+ return getFrameIndexReference(MF, FI, FrameReg);
+
+ // If !hasReservedCallFrame the function might have SP adjustement in the
+ // body. So, even though the offset is statically known, it depends on where
+ // we are in the function.
+ const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
+ if (!IgnoreSPUpdates && !TFI->hasReservedCallFrame(MF))
+ return getFrameIndexReference(MF, FI, FrameReg);
+
+ // We don't handle tail calls, and shouldn't be seeing them either.
+ assert(MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta() >= 0 &&
+ "we don't handle this case!");
// Fill in FrameReg output argument.
FrameReg = TRI->getStackRegister();
@@ -1851,16 +1896,37 @@ bool X86FrameLowering::spillCalleeSavedRegisters(
return true;
// Push GPRs. It increases frame size.
+ const MachineFunction &MF = *MBB.getParent();
unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
for (unsigned i = CSI.size(); i != 0; --i) {
unsigned Reg = CSI[i - 1].getReg();
if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
continue;
- // Add the callee-saved register as live-in. It's killed at the spill.
- MBB.addLiveIn(Reg);
- BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ bool isLiveIn = MRI.isLiveIn(Reg);
+ if (!isLiveIn)
+ MBB.addLiveIn(Reg);
+
+ // Decide whether we can add a kill flag to the use.
+ bool CanKill = !isLiveIn;
+ // Check if any subregister is live-in
+ if (CanKill) {
+ for (MCRegAliasIterator AReg(Reg, TRI, false); AReg.isValid(); ++AReg) {
+ if (MRI.isLiveIn(*AReg)) {
+ CanKill = false;
+ break;
+ }
+ }
+ }
+
+ // Do not set a kill flag on values that are also marked as live-in. This
+ // happens with the @llvm-returnaddress intrinsic and with arguments
+ // passed in callee saved registers.
+ // Omitting the kill flags is conservatively correct even if the live-in
+ // is not used after all.
+ BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, getKillRegState(CanKill))
.setMIFlag(MachineInstr::FrameSetup);
}
@@ -1891,7 +1957,7 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
if (CSI.empty())
return false;
- if (isFuncletReturnInstr(MI) && STI.isOSWindows()) {
+ if (isFuncletReturnInstr(*MI) && STI.isOSWindows()) {
// Don't restore CSRs in 32-bit EH funclets. Matches
// spillCalleeSavedRegisters.
if (STI.is32Bit())
@@ -2250,11 +2316,33 @@ void X86FrameLowering::adjustForSegmentedStacks(
checkMBB->addSuccessor(allocMBB);
checkMBB->addSuccessor(&PrologueMBB);
-#ifdef XDEBUG
+#ifdef EXPENSIVE_CHECKS
MF.verify();
#endif
}
+/// Lookup an ERTS parameter in the !hipe.literals named metadata node.
+/// HiPE provides Erlang Runtime System-internal parameters, such as PCB offsets
+/// to fields it needs, through a named metadata node "hipe.literals" containing
+/// name-value pairs.
+static unsigned getHiPELiteral(
+ NamedMDNode *HiPELiteralsMD, const StringRef LiteralName) {
+ for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) {
+ MDNode *Node = HiPELiteralsMD->getOperand(i);
+ if (Node->getNumOperands() != 2) continue;
+ MDString *NodeName = dyn_cast<MDString>(Node->getOperand(0));
+ ValueAsMetadata *NodeVal = dyn_cast<ValueAsMetadata>(Node->getOperand(1));
+ if (!NodeName || !NodeVal) continue;
+ ConstantInt *ValConst = dyn_cast_or_null<ConstantInt>(NodeVal->getValue());
+ if (ValConst && NodeName->getString() == LiteralName) {
+ return ValConst->getZExtValue();
+ }
+ }
+
+ report_fatal_error("HiPE literal " + LiteralName
+ + " required but not provided");
+}
+
/// Erlang programs may need a special prologue to handle the stack size they
/// might need at runtime. That is because Erlang/OTP does not implement a C
/// stack but uses a custom implementation of hybrid stack/heap architecture.
@@ -2280,7 +2368,14 @@ void X86FrameLowering::adjustForHiPEPrologue(
assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
// HiPE-specific values
- const unsigned HipeLeafWords = 24;
+ NamedMDNode *HiPELiteralsMD = MF.getMMI().getModule()
+ ->getNamedMetadata("hipe.literals");
+ if (!HiPELiteralsMD)
+ report_fatal_error(
+ "Can't generate HiPE prologue without runtime parameters");
+ const unsigned HipeLeafWords
+ = getHiPELiteral(HiPELiteralsMD,
+ Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS");
const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
const unsigned Guaranteed = HipeLeafWords * SlotSize;
unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
@@ -2300,15 +2395,13 @@ void X86FrameLowering::adjustForHiPEPrologue(
if (MFI->hasCalls()) {
unsigned MoreStackForCalls = 0;
- for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();
- MBBI != MBBE; ++MBBI)
- for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();
- MI != ME; ++MI) {
- if (!MI->isCall())
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ if (!MI.isCall())
continue;
// Get callee operand.
- const MachineOperand &MO = MI->getOperand(0);
+ const MachineOperand &MO = MI.getOperand(0);
// Only take account of global function calls (no closures etc.).
if (!MO.isGlobal())
@@ -2334,6 +2427,7 @@ void X86FrameLowering::adjustForHiPEPrologue(
MoreStackForCalls = std::max(MoreStackForCalls,
(HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
}
+ }
MaxStack += MoreStackForCalls;
}
@@ -2353,20 +2447,19 @@ void X86FrameLowering::adjustForHiPEPrologue(
unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
unsigned LEAop, CMPop, CALLop;
+ SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT");
if (Is64Bit) {
SPReg = X86::RSP;
PReg = X86::RBP;
LEAop = X86::LEA64r;
CMPop = X86::CMP64rm;
CALLop = X86::CALL64pcrel32;
- SPLimitOffset = 0x90;
} else {
SPReg = X86::ESP;
PReg = X86::EBP;
LEAop = X86::LEA32r;
CMPop = X86::CMP32rm;
CALLop = X86::CALLpcrel32;
- SPLimitOffset = 0x4c;
}
ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
@@ -2395,13 +2488,15 @@ void X86FrameLowering::adjustForHiPEPrologue(
incStackMBB->addSuccessor(&PrologueMBB, {99, 100});
incStackMBB->addSuccessor(incStackMBB, {1, 100});
}
-#ifdef XDEBUG
+#ifdef EXPENSIVE_CHECKS
MF.verify();
#endif
}
bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL, int Offset) const {
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL,
+ int Offset) const {
if (Offset <= 0)
return false;
@@ -2440,7 +2535,8 @@ bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
bool IsDef = false;
for (const MachineOperand &MO : Prev->implicit_operands()) {
- if (MO.isReg() && MO.isDef() && MO.getReg() == Candidate) {
+ if (MO.isReg() && MO.isDef() &&
+ TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) {
IsDef = true;
break;
}
@@ -2468,7 +2564,7 @@ bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
return true;
}
-void X86FrameLowering::
+MachineBasicBlock::iterator X86FrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
bool reserveCallFrame = hasReservedCallFrame(MF);
@@ -2488,7 +2584,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// amount of space needed for the outgoing arguments up to the next
// alignment boundary.
unsigned StackAlign = getStackAlignment();
- Amount = RoundUpToAlignment(Amount, StackAlign);
+ Amount = alignTo(Amount, StackAlign);
MachineModuleInfo &MMI = MF.getMMI();
const Function *Fn = MF.getFunction();
@@ -2512,7 +2608,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MCCFIInstruction::createGnuArgsSize(nullptr, Amount));
if (Amount == 0)
- return;
+ return I;
// Factor out the amount that gets handled inside the sequence
// (Pushes of argument for frame setup, callee pops for frame destroy)
@@ -2525,13 +2621,23 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
BuildCFI(MBB, I, DL,
MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt));
- if (Amount) {
- // Add Amount to SP to destroy a frame, and subtract to setup.
- int Offset = isDestroy ? Amount : -Amount;
-
- if (!(Fn->optForMinSize() &&
- adjustStackWithPops(MBB, I, DL, Offset)))
- BuildStackAdjustment(MBB, I, DL, Offset, /*InEpilogue=*/false);
+ // Add Amount to SP to destroy a frame, or subtract to setup.
+ int64_t StackAdjustment = isDestroy ? Amount : -Amount;
+ int64_t CfaAdjustment = -StackAdjustment;
+
+ if (StackAdjustment) {
+ // Merge with any previous or following adjustment instruction. Note: the
+ // instructions merged with here do not have CFI, so their stack
+ // adjustments do not feed into CfaAdjustment.
+ StackAdjustment += mergeSPUpdates(MBB, I, true);
+ StackAdjustment += mergeSPUpdates(MBB, I, false);
+
+ if (StackAdjustment) {
+ if (!(Fn->optForMinSize() &&
+ adjustStackWithPops(MBB, I, DL, StackAdjustment)))
+ BuildStackAdjustment(MBB, I, DL, StackAdjustment,
+ /*InEpilogue=*/false);
+ }
}
if (DwarfCFI && !hasFP(MF)) {
@@ -2541,18 +2647,16 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// CFI only for EH purposes or for debugging. EH only requires the CFA
// offset to be correct at each call site, while for debugging we want
// it to be more precise.
- int CFAOffset = Amount;
+
// TODO: When not using precise CFA, we also need to adjust for the
// InternalAmt here.
-
- if (CFAOffset) {
- CFAOffset = isDestroy ? -CFAOffset : CFAOffset;
- BuildCFI(MBB, I, DL,
- MCCFIInstruction::createAdjustCfaOffset(nullptr, CFAOffset));
+ if (CfaAdjustment) {
+ BuildCFI(MBB, I, DL, MCCFIInstruction::createAdjustCfaOffset(
+ nullptr, CfaAdjustment));
}
}
- return;
+ return I;
}
if (isDestroy && InternalAmt) {
@@ -2562,11 +2666,20 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// We are not tracking the stack pointer adjustment by the callee, so make
// sure we restore the stack pointer immediately after the call, there may
// be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
+ MachineBasicBlock::iterator CI = I;
MachineBasicBlock::iterator B = MBB.begin();
- while (I != B && !std::prev(I)->isCall())
- --I;
- BuildStackAdjustment(MBB, I, DL, -InternalAmt, /*InEpilogue=*/false);
+ while (CI != B && !std::prev(CI)->isCall())
+ --CI;
+ BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false);
}
+
+ return I;
+}
+
+bool X86FrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
+ assert(MBB.getParent() && "Block is not attached to a function!");
+ const MachineFunction &MF = *MBB.getParent();
+ return !TRI->needsStackRealignment(MF) || !MBB.isLiveIn(X86::EFLAGS);
}
bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
@@ -2604,7 +2717,7 @@ bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
- DebugLoc DL, bool RestoreSP) const {
+ const DebugLoc &DL, bool RestoreSP) const {
assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env");
assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32");
assert(STI.is32Bit() && !Uses64BitFramePtr &&
@@ -2664,6 +2777,150 @@ MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
return MBBI;
}
+namespace {
+// Struct used by orderFrameObjects to help sort the stack objects.
+struct X86FrameSortingObject {
+ bool IsValid = false; // true if we care about this Object.
+ unsigned ObjectIndex = 0; // Index of Object into MFI list.
+ unsigned ObjectSize = 0; // Size of Object in bytes.
+ unsigned ObjectAlignment = 1; // Alignment of Object in bytes.
+ unsigned ObjectNumUses = 0; // Object static number of uses.
+};
+
+// The comparison function we use for std::sort to order our local
+// stack symbols. The current algorithm is to use an estimated
+// "density". This takes into consideration the size and number of
+// uses each object has in order to roughly minimize code size.
+// So, for example, an object of size 16B that is referenced 5 times
+// will get higher priority than 4 4B objects referenced 1 time each.
+// It's not perfect and we may be able to squeeze a few more bytes out of
+// it (for example : 0(esp) requires fewer bytes, symbols allocated at the
+// fringe end can have special consideration, given their size is less
+// important, etc.), but the algorithmic complexity grows too much to be
+// worth the extra gains we get. This gets us pretty close.
+// The final order leaves us with objects with highest priority going
+// at the end of our list.
+struct X86FrameSortingComparator {
+ inline bool operator()(const X86FrameSortingObject &A,
+ const X86FrameSortingObject &B) {
+ uint64_t DensityAScaled, DensityBScaled;
+
+ // For consistency in our comparison, all invalid objects are placed
+ // at the end. This also allows us to stop walking when we hit the
+ // first invalid item after it's all sorted.
+ if (!A.IsValid)
+ return false;
+ if (!B.IsValid)
+ return true;
+
+ // The density is calculated by doing :
+ // (double)DensityA = A.ObjectNumUses / A.ObjectSize
+ // (double)DensityB = B.ObjectNumUses / B.ObjectSize
+ // Since this approach may cause inconsistencies in
+ // the floating point <, >, == comparisons, depending on the floating
+ // point model with which the compiler was built, we're going
+ // to scale both sides by multiplying with
+ // A.ObjectSize * B.ObjectSize. This ends up factoring away
+ // the division and, with it, the need for any floating point
+ // arithmetic.
+ DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) *
+ static_cast<uint64_t>(B.ObjectSize);
+ DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) *
+ static_cast<uint64_t>(A.ObjectSize);
+
+ // If the two densities are equal, prioritize highest alignment
+ // objects. This allows for similar alignment objects
+ // to be packed together (given the same density).
+ // There's room for improvement here, also, since we can pack
+ // similar alignment (different density) objects next to each
+ // other to save padding. This will also require further
+ // complexity/iterations, and the overall gain isn't worth it,
+ // in general. Something to keep in mind, though.
+ if (DensityAScaled == DensityBScaled)
+ return A.ObjectAlignment < B.ObjectAlignment;
+
+ return DensityAScaled < DensityBScaled;
+ }
+};
+} // namespace
+
+// Order the symbols in the local stack.
+// We want to place the local stack objects in some sort of sensible order.
+// The heuristic we use is to try and pack them according to static number
+// of uses and size of object in order to minimize code size.
+void X86FrameLowering::orderFrameObjects(
+ const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ // Don't waste time if there's nothing to do.
+ if (ObjectsToAllocate.empty())
+ return;
+
+ // Create an array of all MFI objects. We won't need all of these
+ // objects, but we're going to create a full array of them to make
+ // it easier to index into when we're counting "uses" down below.
+ // We want to be able to easily/cheaply access an object by simply
+ // indexing into it, instead of having to search for it every time.
+ std::vector<X86FrameSortingObject> SortingObjects(MFI->getObjectIndexEnd());
+
+ // Walk the objects we care about and mark them as such in our working
+ // struct.
+ for (auto &Obj : ObjectsToAllocate) {
+ SortingObjects[Obj].IsValid = true;
+ SortingObjects[Obj].ObjectIndex = Obj;
+ SortingObjects[Obj].ObjectAlignment = MFI->getObjectAlignment(Obj);
+ // Set the size.
+ int ObjectSize = MFI->getObjectSize(Obj);
+ if (ObjectSize == 0)
+ // Variable size. Just use 4.
+ SortingObjects[Obj].ObjectSize = 4;
+ else
+ SortingObjects[Obj].ObjectSize = ObjectSize;
+ }
+
+ // Count the number of uses for each object.
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ if (MI.isDebugValue())
+ continue;
+ for (const MachineOperand &MO : MI.operands()) {
+ // Check to see if it's a local stack symbol.
+ if (!MO.isFI())
+ continue;
+ int Index = MO.getIndex();
+ // Check to see if it falls within our range, and is tagged
+ // to require ordering.
+ if (Index >= 0 && Index < MFI->getObjectIndexEnd() &&
+ SortingObjects[Index].IsValid)
+ SortingObjects[Index].ObjectNumUses++;
+ }
+ }
+ }
+
+ // Sort the objects using X86FrameSortingAlgorithm (see its comment for
+ // info).
+ std::stable_sort(SortingObjects.begin(), SortingObjects.end(),
+ X86FrameSortingComparator());
+
+ // Now modify the original list to represent the final order that
+ // we want. The order will depend on whether we're going to access them
+ // from the stack pointer or the frame pointer. For SP, the list should
+ // end up with the END containing objects that we want with smaller offsets.
+ // For FP, it should be flipped.
+ int i = 0;
+ for (auto &Obj : SortingObjects) {
+ // All invalid items are sorted at the end, so it's safe to stop.
+ if (!Obj.IsValid)
+ break;
+ ObjectsToAllocate[i++] = Obj.ObjectIndex;
+ }
+
+ // Flip it if we're accessing off of the FP.
+ if (!TRI->needsStackRealignment(MF) && hasFP(MF))
+ std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end());
+}
+
+
unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const {
// RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue.
unsigned Offset = 16;
@@ -2691,14 +2948,30 @@ void X86FrameLowering::processFunctionBeforeFrameFinalized(
// were no fixed objects, use offset -SlotSize, which is immediately after the
// return address. Fixed objects have negative frame indices.
MachineFrameInfo *MFI = MF.getFrameInfo();
+ WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo();
int64_t MinFixedObjOffset = -SlotSize;
for (int I = MFI->getObjectIndexBegin(); I < 0; ++I)
MinFixedObjOffset = std::min(MinFixedObjOffset, MFI->getObjectOffset(I));
+ for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
+ for (WinEHHandlerType &H : TBME.HandlerArray) {
+ int FrameIndex = H.CatchObj.FrameIndex;
+ if (FrameIndex != INT_MAX) {
+ // Ensure alignment.
+ unsigned Align = MFI->getObjectAlignment(FrameIndex);
+ MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align;
+ MinFixedObjOffset -= MFI->getObjectSize(FrameIndex);
+ MFI->setObjectOffset(FrameIndex, MinFixedObjOffset);
+ }
+ }
+ }
+
+ // Ensure alignment.
+ MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8;
int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize;
int UnwindHelpFI =
MFI->CreateFixedObject(SlotSize, UnwindHelpOffset, /*Immutable=*/false);
- MF.getWinEHFuncInfo()->UnwindHelpFrameIdx = UnwindHelpFI;
+ EHInfo.UnwindHelpFrameIdx = UnwindHelpFI;
// Store -2 into UnwindHelp on function entry. We have to scan forwards past
// other frame setup instructions.
diff --git a/lib/Target/X86/X86FrameLowering.h b/lib/Target/X86/X86FrameLowering.h
index 3ab41b4a57890..4a01014ee545d 100644
--- a/lib/Target/X86/X86FrameLowering.h
+++ b/lib/Target/X86/X86FrameLowering.h
@@ -52,8 +52,8 @@ public:
/// the number of bytes to probe in RAX/EAX. Returns instruction just
/// after the expansion.
MachineInstr *emitStackProbe(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL,
- bool InProlog) const;
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, bool InProlog) const;
/// Replace a StackProbe inline-stub with the actual probe code inline.
void inlineStackProbe(MachineFunction &MF,
@@ -61,7 +61,7 @@ public:
void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL) const;
+ const DebugLoc &DL) const;
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
@@ -100,12 +100,13 @@ public:
int getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const override;
- int getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI,
- unsigned &FrameReg) const override;
+ int getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI,
+ unsigned &FrameReg,
+ bool IgnoreSPUpdates) const override;
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const override;
+ MachineBasicBlock::iterator
+ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const override;
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override;
@@ -127,6 +128,16 @@ public:
/// Check that LEA can be used on SP in an epilogue sequence for \p MF.
bool canUseLEAForSPInEpilogue(const MachineFunction &MF) const;
+ /// Check whether or not the given \p MBB can be used as a prologue
+ /// for the target.
+ /// The prologue will be inserted first in this basic block.
+ /// This method is used by the shrink-wrapping pass to decide if
+ /// \p MBB will be correctly handled by the target.
+ /// As soon as the target enable shrink-wrapping without overriding
+ /// this method, we assume that each basic block is a valid
+ /// prologue.
+ bool canUseAsPrologue(const MachineBasicBlock &MBB) const override;
+
/// Check whether or not the given \p MBB can be used as a epilogue
/// for the target.
/// The epilogue will be inserted before the first terminator of that block.
@@ -137,6 +148,13 @@ public:
/// Returns true if the target will correctly handle shrink wrapping.
bool enableShrinkWrapping(const MachineFunction &MF) const override;
+ /// Order the symbols in the local stack.
+ /// We want to place the local stack objects in some sort of sensible order.
+ /// The heuristic we use is to try and pack them according to static number
+ /// of uses and size in order to minimize code size.
+ void orderFrameObjects(const MachineFunction &MF,
+ SmallVectorImpl<int> &ObjectsToAllocate) const override;
+
/// convertArgMovsToPushes - This method tries to convert a call sequence
/// that uses sub and mov instructions to put the argument onto the stack
/// into a series of pushes.
@@ -148,14 +166,14 @@ public:
/// Wraps up getting a CFI index and building a MachineInstr for it.
void BuildCFI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
- DebugLoc DL, MCCFIInstruction CFIInst) const;
+ const DebugLoc &DL, const MCCFIInstruction &CFIInst) const;
/// Sets up EBP and optionally ESI based on the incoming EBP value. Only
/// needed for 32-bit. Used in funclet prologues and at catchret destinations.
MachineBasicBlock::iterator
restoreWin32EHStackPointers(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL,
- bool RestoreSP = false) const;
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, bool RestoreSP = false) const;
private:
uint64_t calculateMaxStackAlign(const MachineFunction &MF) const;
@@ -163,34 +181,35 @@ private:
/// Emit target stack probe as a call to a helper function
MachineInstr *emitStackProbeCall(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL, bool InProlog) const;
+ const DebugLoc &DL, bool InProlog) const;
/// Emit target stack probe as an inline sequence.
MachineInstr *emitStackProbeInline(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL, bool InProlog) const;
+ const DebugLoc &DL, bool InProlog) const;
/// Emit a stub to later inline the target stack probe.
MachineInstr *emitStackProbeInlineStub(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL, bool InProlog) const;
+ const DebugLoc &DL,
+ bool InProlog) const;
/// Aligns the stack pointer by ANDing it with -MaxAlign.
void BuildStackAlignAND(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL,
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
unsigned Reg, uint64_t MaxAlign) const;
/// Make small positive stack adjustments using POPs.
bool adjustStackWithPops(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL,
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
int Offset) const;
/// Adjusts the stack pointer using LEA, SUB, or ADD.
MachineInstrBuilder BuildStackAdjustment(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL, int64_t Offset,
+ const DebugLoc &DL, int64_t Offset,
bool InEpilogue) const;
unsigned getPSPSlotOffsetFromSP(const MachineFunction &MF) const;
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index 868ae4e19e55b..7d53b3db6175d 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -157,9 +157,13 @@ namespace {
/// performance.
bool OptForSize;
+ /// If true, selector should try to optimize for minimum code size.
+ bool OptForMinSize;
+
public:
explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
- : SelectionDAGISel(tm, OptLevel), OptForSize(false) {}
+ : SelectionDAGISel(tm, OptLevel), OptForSize(false),
+ OptForMinSize(false) {}
const char *getPassName() const override {
return "X86 DAG->DAG Instruction Selection";
@@ -192,9 +196,8 @@ namespace {
#include "X86GenDAGISel.inc"
private:
- SDNode *Select(SDNode *N) override;
- SDNode *selectGather(SDNode *N, unsigned Opc);
- SDNode *selectAtomicLoadArith(SDNode *Node, MVT NVT);
+ void Select(SDNode *N) override;
+ bool tryGather(SDNode *N, unsigned Opc);
bool foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
bool matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
@@ -238,7 +241,7 @@ namespace {
void emitSpecialCodeForMain();
- inline void getAddressOperands(X86ISelAddressMode &AM, SDLoc DL,
+ inline void getAddressOperands(X86ISelAddressMode &AM, const SDLoc &DL,
SDValue &Base, SDValue &Scale,
SDValue &Index, SDValue &Disp,
SDValue &Segment) {
@@ -323,7 +326,7 @@ namespace {
// types.
if (User->getNumOperands() != 2)
continue;
-
+
// Immediates that are used for offsets as part of stack
// manipulation should be left alone. These are typically
// used to indicate SP offsets for argument passing and
@@ -357,12 +360,12 @@ namespace {
}
/// Return a target constant with the specified value of type i8.
- inline SDValue getI8Imm(unsigned Imm, SDLoc DL) {
+ inline SDValue getI8Imm(unsigned Imm, const SDLoc &DL) {
return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
}
/// Return a target constant with the specified value, of type i32.
- inline SDValue getI32Imm(unsigned Imm, SDLoc DL) {
+ inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
}
@@ -531,8 +534,10 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
}
void X86DAGToDAGISel::PreprocessISelDAG() {
- // OptForSize is used in pattern predicates that isel is matching.
+ // OptFor[Min]Size are used in pattern predicates that isel is matching.
OptForSize = MF->getFunction()->optForSize();
+ OptForMinSize = MF->getFunction()->optForMinSize();
+ assert((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize");
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
E = CurDAG->allnodes_end(); I != E; ) {
@@ -545,7 +550,7 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
(N->getOpcode() == X86ISD::TC_RETURN &&
// Only does this if load can be folded into TC_RETURN.
(Subtarget->is64Bit() ||
- getTargetMachine().getRelocationModel() != Reloc::PIC_)))) {
+ !getTargetMachine().isPositionIndependent())))) {
/// Also try moving call address load from outside callseq_start to just
/// before the call to allow it to be folded.
///
@@ -624,13 +629,11 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
SDLoc dl(N);
// FIXME: optimize the case where the src/dest is a load or store?
- SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
- N->getOperand(0),
- MemTmp, MachinePointerInfo(), MemVT,
- false, false, 0);
+ SDValue Store =
+ CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, N->getOperand(0),
+ MemTmp, MachinePointerInfo(), MemVT);
SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
- MachinePointerInfo(),
- MemVT, false, false, false, 0);
+ MachinePointerInfo(), MemVT);
// We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
// extload we created. This will cause general havok on the dag because
@@ -657,7 +660,7 @@ void X86DAGToDAGISel::emitSpecialCodeForMain() {
CLI.setChain(CurDAG->getRoot())
.setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()),
CurDAG->getExternalSymbol("__main", TLI->getPointerTy(DL)),
- std::move(Args), 0);
+ std::move(Args));
const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
CurDAG->setRoot(Result.second);
@@ -714,7 +717,7 @@ bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
// For more information see http://people.redhat.com/drepper/tls.pdf
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr &&
- Subtarget->isTargetLinux())
+ Subtarget->isTargetGlibc())
switch (N->getPointerInfo().getAddrSpace()) {
case 256:
AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
@@ -722,6 +725,8 @@ bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
case 257:
AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
return false;
+ // Address space 258 is not handled here, because it is not used to
+ // address TLS areas.
}
return true;
@@ -1419,11 +1424,13 @@ bool X86DAGToDAGISel::selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
return false;
X86ISelAddressMode AM;
unsigned AddrSpace = Mgs->getPointerInfo().getAddrSpace();
- // AddrSpace 256 -> GS, 257 -> FS.
+ // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
if (AddrSpace == 256)
AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
if (AddrSpace == 257)
AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
+ if (AddrSpace == 258)
+ AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
SDLoc DL(N);
Base = Mgs->getBasePtr();
@@ -1468,11 +1475,13 @@ bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
unsigned AddrSpace =
cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
- // AddrSpace 256 -> GS, 257 -> FS.
+ // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
if (AddrSpace == 256)
AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
if (AddrSpace == 257)
AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
+ if (AddrSpace == 258)
+ AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
}
if (matchAddress(N, AM))
@@ -1569,10 +1578,12 @@ bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) {
bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index,
SDValue &Disp, SDValue &Segment) {
+ // Save the debug loc before calling selectLEAAddr, in case it invalidates N.
+ SDLoc DL(N);
+
if (!selectLEAAddr(N, Base, Scale, Index, Disp, Segment))
return false;
- SDLoc DL(N);
RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base);
if (RN && RN->getReg() == 0)
Base = CurDAG->getRegister(0, MVT::i64);
@@ -1612,6 +1623,10 @@ bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
SDValue &Segment) {
X86ISelAddressMode AM;
+ // Save the DL and VT before calling matchAddress, it can invalidate N.
+ SDLoc DL(N);
+ MVT VT = N.getSimpleValueType();
+
// Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
// segments.
SDValue Copy = AM.Segment;
@@ -1622,7 +1637,6 @@ bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
assert (T == AM.Segment);
AM.Segment = Copy;
- MVT VT = N.getSimpleValueType();
unsigned Complexity = 0;
if (AM.BaseType == X86ISelAddressMode::RegBase)
if (AM.Base_Reg.getNode())
@@ -1662,7 +1676,7 @@ bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
if (Complexity <= 2)
return false;
- getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
+ getAddressOperands(AM, DL, Base, Scale, Index, Disp, Segment);
return true;
}
@@ -1713,295 +1727,6 @@ SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
}
-/// Atomic opcode table
-///
-enum AtomicOpc {
- ADD,
- SUB,
- INC,
- DEC,
- OR,
- AND,
- XOR,
- AtomicOpcEnd
-};
-
-enum AtomicSz {
- ConstantI8,
- I8,
- SextConstantI16,
- ConstantI16,
- I16,
- SextConstantI32,
- ConstantI32,
- I32,
- SextConstantI64,
- ConstantI64,
- I64,
- AtomicSzEnd
-};
-
-static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
- {
- X86::LOCK_ADD8mi,
- X86::LOCK_ADD8mr,
- X86::LOCK_ADD16mi8,
- X86::LOCK_ADD16mi,
- X86::LOCK_ADD16mr,
- X86::LOCK_ADD32mi8,
- X86::LOCK_ADD32mi,
- X86::LOCK_ADD32mr,
- X86::LOCK_ADD64mi8,
- X86::LOCK_ADD64mi32,
- X86::LOCK_ADD64mr,
- },
- {
- X86::LOCK_SUB8mi,
- X86::LOCK_SUB8mr,
- X86::LOCK_SUB16mi8,
- X86::LOCK_SUB16mi,
- X86::LOCK_SUB16mr,
- X86::LOCK_SUB32mi8,
- X86::LOCK_SUB32mi,
- X86::LOCK_SUB32mr,
- X86::LOCK_SUB64mi8,
- X86::LOCK_SUB64mi32,
- X86::LOCK_SUB64mr,
- },
- {
- 0,
- X86::LOCK_INC8m,
- 0,
- 0,
- X86::LOCK_INC16m,
- 0,
- 0,
- X86::LOCK_INC32m,
- 0,
- 0,
- X86::LOCK_INC64m,
- },
- {
- 0,
- X86::LOCK_DEC8m,
- 0,
- 0,
- X86::LOCK_DEC16m,
- 0,
- 0,
- X86::LOCK_DEC32m,
- 0,
- 0,
- X86::LOCK_DEC64m,
- },
- {
- X86::LOCK_OR8mi,
- X86::LOCK_OR8mr,
- X86::LOCK_OR16mi8,
- X86::LOCK_OR16mi,
- X86::LOCK_OR16mr,
- X86::LOCK_OR32mi8,
- X86::LOCK_OR32mi,
- X86::LOCK_OR32mr,
- X86::LOCK_OR64mi8,
- X86::LOCK_OR64mi32,
- X86::LOCK_OR64mr,
- },
- {
- X86::LOCK_AND8mi,
- X86::LOCK_AND8mr,
- X86::LOCK_AND16mi8,
- X86::LOCK_AND16mi,
- X86::LOCK_AND16mr,
- X86::LOCK_AND32mi8,
- X86::LOCK_AND32mi,
- X86::LOCK_AND32mr,
- X86::LOCK_AND64mi8,
- X86::LOCK_AND64mi32,
- X86::LOCK_AND64mr,
- },
- {
- X86::LOCK_XOR8mi,
- X86::LOCK_XOR8mr,
- X86::LOCK_XOR16mi8,
- X86::LOCK_XOR16mi,
- X86::LOCK_XOR16mr,
- X86::LOCK_XOR32mi8,
- X86::LOCK_XOR32mi,
- X86::LOCK_XOR32mr,
- X86::LOCK_XOR64mi8,
- X86::LOCK_XOR64mi32,
- X86::LOCK_XOR64mr,
- }
-};
-
-// Return the target constant operand for atomic-load-op and do simple
-// translations, such as from atomic-load-add to lock-sub. The return value is
-// one of the following 3 cases:
-// + target-constant, the operand could be supported as a target constant.
-// + empty, the operand is not needed any more with the new op selected.
-// + non-empty, otherwise.
-static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG,
- SDLoc dl,
- enum AtomicOpc &Op, MVT NVT,
- SDValue Val,
- const X86Subtarget *Subtarget) {
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val)) {
- int64_t CNVal = CN->getSExtValue();
- // Quit if not 32-bit imm.
- if ((int32_t)CNVal != CNVal)
- return Val;
- // Quit if INT32_MIN: it would be negated as it is negative and overflow,
- // producing an immediate that does not fit in the 32 bits available for
- // an immediate operand to sub. However, it still fits in 32 bits for the
- // add (since it is not negated) so we can return target-constant.
- if (CNVal == INT32_MIN)
- return CurDAG->getTargetConstant(CNVal, dl, NVT);
- // For atomic-load-add, we could do some optimizations.
- if (Op == ADD) {
- // Translate to INC/DEC if ADD by 1 or -1.
- if (((CNVal == 1) || (CNVal == -1)) && !Subtarget->slowIncDec()) {
- Op = (CNVal == 1) ? INC : DEC;
- // No more constant operand after being translated into INC/DEC.
- return SDValue();
- }
- // Translate to SUB if ADD by negative value.
- if (CNVal < 0) {
- Op = SUB;
- CNVal = -CNVal;
- }
- }
- return CurDAG->getTargetConstant(CNVal, dl, NVT);
- }
-
- // If the value operand is single-used, try to optimize it.
- if (Op == ADD && Val.hasOneUse()) {
- // Translate (atomic-load-add ptr (sub 0 x)) back to (lock-sub x).
- if (Val.getOpcode() == ISD::SUB && X86::isZeroNode(Val.getOperand(0))) {
- Op = SUB;
- return Val.getOperand(1);
- }
- // A special case for i16, which needs truncating as, in most cases, it's
- // promoted to i32. We will translate
- // (atomic-load-add (truncate (sub 0 x))) to (lock-sub (EXTRACT_SUBREG x))
- if (Val.getOpcode() == ISD::TRUNCATE && NVT == MVT::i16 &&
- Val.getOperand(0).getOpcode() == ISD::SUB &&
- X86::isZeroNode(Val.getOperand(0).getOperand(0))) {
- Op = SUB;
- Val = Val.getOperand(0);
- return CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, NVT,
- Val.getOperand(1));
- }
- }
-
- return Val;
-}
-
-SDNode *X86DAGToDAGISel::selectAtomicLoadArith(SDNode *Node, MVT NVT) {
- if (Node->hasAnyUseOfValue(0))
- return nullptr;
-
- SDLoc dl(Node);
-
- // Optimize common patterns for __sync_or_and_fetch and similar arith
- // operations where the result is not used. This allows us to use the "lock"
- // version of the arithmetic instruction.
- SDValue Chain = Node->getOperand(0);
- SDValue Ptr = Node->getOperand(1);
- SDValue Val = Node->getOperand(2);
- SDValue Base, Scale, Index, Disp, Segment;
- if (!selectAddr(Node, Ptr, Base, Scale, Index, Disp, Segment))
- return nullptr;
-
- // Which index into the table.
- enum AtomicOpc Op;
- switch (Node->getOpcode()) {
- default:
- return nullptr;
- case ISD::ATOMIC_LOAD_OR:
- Op = OR;
- break;
- case ISD::ATOMIC_LOAD_AND:
- Op = AND;
- break;
- case ISD::ATOMIC_LOAD_XOR:
- Op = XOR;
- break;
- case ISD::ATOMIC_LOAD_ADD:
- Op = ADD;
- break;
- }
-
- Val = getAtomicLoadArithTargetConstant(CurDAG, dl, Op, NVT, Val, Subtarget);
- bool isUnOp = !Val.getNode();
- bool isCN = Val.getNode() && (Val.getOpcode() == ISD::TargetConstant);
-
- unsigned Opc = 0;
- switch (NVT.SimpleTy) {
- default: return nullptr;
- case MVT::i8:
- if (isCN)
- Opc = AtomicOpcTbl[Op][ConstantI8];
- else
- Opc = AtomicOpcTbl[Op][I8];
- break;
- case MVT::i16:
- if (isCN) {
- if (immSext8(Val.getNode()))
- Opc = AtomicOpcTbl[Op][SextConstantI16];
- else
- Opc = AtomicOpcTbl[Op][ConstantI16];
- } else
- Opc = AtomicOpcTbl[Op][I16];
- break;
- case MVT::i32:
- if (isCN) {
- if (immSext8(Val.getNode()))
- Opc = AtomicOpcTbl[Op][SextConstantI32];
- else
- Opc = AtomicOpcTbl[Op][ConstantI32];
- } else
- Opc = AtomicOpcTbl[Op][I32];
- break;
- case MVT::i64:
- if (isCN) {
- if (immSext8(Val.getNode()))
- Opc = AtomicOpcTbl[Op][SextConstantI64];
- else if (i64immSExt32(Val.getNode()))
- Opc = AtomicOpcTbl[Op][ConstantI64];
- else
- llvm_unreachable("True 64 bits constant in SelectAtomicLoadArith");
- } else
- Opc = AtomicOpcTbl[Op][I64];
- break;
- }
-
- assert(Opc != 0 && "Invalid arith lock transform!");
-
- // Building the new node.
- SDValue Ret;
- if (isUnOp) {
- SDValue Ops[] = { Base, Scale, Index, Disp, Segment, Chain };
- Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0);
- } else {
- SDValue Ops[] = { Base, Scale, Index, Disp, Segment, Val, Chain };
- Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0);
- }
-
- // Copying the MachineMemOperand.
- MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
- MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
- cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
-
- // We need to have two outputs as that is what the original instruction had.
- // So we add a dummy, undefined output. This is safe as we checked first
- // that no-one uses our output anyway.
- SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
- dl, NVT), 0);
- SDValue RetVals[] = { Undef, Ret };
- return CurDAG->getMergeValues(RetVals, dl).getNode();
-}
-
/// Test whether the given X86ISD::CMP node has any uses which require the SF
/// or OF bits to be accurate.
static bool hasNoSignedComparisonUses(SDNode *N) {
@@ -2168,7 +1893,7 @@ static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) {
}
/// Customized ISel for GATHER operations.
-SDNode *X86DAGToDAGISel::selectGather(SDNode *Node, unsigned Opc) {
+bool X86DAGToDAGISel::tryGather(SDNode *Node, unsigned Opc) {
// Operands of Gather: VSrc, Base, VIdx, VMask, Scale
SDValue Chain = Node->getOperand(0);
SDValue VSrc = Node->getOperand(2);
@@ -2177,7 +1902,7 @@ SDNode *X86DAGToDAGISel::selectGather(SDNode *Node, unsigned Opc) {
SDValue VMask = Node->getOperand(5);
ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6));
if (!Scale)
- return nullptr;
+ return false;
SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(),
MVT::Other);
@@ -2196,10 +1921,11 @@ SDNode *X86DAGToDAGISel::selectGather(SDNode *Node, unsigned Opc) {
// of ResNode.
ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
ReplaceUses(SDValue(Node, 1), SDValue(ResNode, 2));
- return ResNode;
+ CurDAG->RemoveDeadNode(Node);
+ return true;
}
-SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
+void X86DAGToDAGISel::Select(SDNode *Node) {
MVT NVT = Node->getSimpleValueType(0);
unsigned Opc, MOpc;
unsigned Opcode = Node->getOpcode();
@@ -2210,7 +1936,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
if (Node->isMachineOpcode()) {
DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
Node->setNodeId(-1);
- return nullptr; // Already selected.
+ return; // Already selected.
}
switch (Opcode) {
@@ -2229,10 +1955,10 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue ZextTarget = CurDAG->getZExtOrTrunc(Target, dl, EVT(MVT::i64));
SDValue Brind = CurDAG->getNode(ISD::BRIND, dl, MVT::Other,
Node->getOperand(0), ZextTarget);
- ReplaceUses(SDValue(Node, 0), Brind);
+ ReplaceNode(Node, Brind.getNode());
SelectCode(ZextTarget.getNode());
SelectCode(Brind.getNode());
- return nullptr;
+ return;
}
break;
}
@@ -2278,17 +2004,16 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::x86_avx2_gather_q_d: Opc = X86::VPGATHERQDrm; break;
case Intrinsic::x86_avx2_gather_q_d_256: Opc = X86::VPGATHERQDYrm; break;
}
- SDNode *RetVal = selectGather(Node, Opc);
- if (RetVal)
- // We already called ReplaceUses inside SelectGather.
- return nullptr;
+ if (tryGather(Node, Opc))
+ return;
break;
}
}
break;
}
case X86ISD::GlobalBaseReg:
- return getGlobalBaseReg();
+ ReplaceNode(Node, getGlobalBaseReg());
+ return;
case X86ISD::SHRUNKBLEND: {
// SHRUNKBLEND selects like a regular VSELECT.
@@ -2298,18 +2023,9 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
ReplaceUses(SDValue(Node, 0), VSelect);
SelectCode(VSelect.getNode());
// We already called ReplaceUses.
- return nullptr;
+ return;
}
- case ISD::ATOMIC_LOAD_XOR:
- case ISD::ATOMIC_LOAD_AND:
- case ISD::ATOMIC_LOAD_OR:
- case ISD::ATOMIC_LOAD_ADD: {
- SDNode *RetVal = selectAtomicLoadArith(Node, NVT);
- if (RetVal)
- return RetVal;
- break;
- }
case ISD::AND:
case ISD::OR:
case ISD::XOR: {
@@ -2387,10 +2103,12 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, dl, CstVT);
SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
if (ShlVal == 1)
- return CurDAG->SelectNodeTo(Node, AddOp, NVT, SDValue(New, 0),
- SDValue(New, 0));
- return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
- getI8Imm(ShlVal, dl));
+ CurDAG->SelectNodeTo(Node, AddOp, NVT, SDValue(New, 0),
+ SDValue(New, 0));
+ else
+ CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
+ getI8Imm(ShlVal, dl));
+ return;
}
case X86ISD::UMUL8:
case X86ISD::SMUL8: {
@@ -2406,9 +2124,8 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue Ops[] = {N1, InFlag};
SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
- ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
- ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
- return nullptr;
+ ReplaceNode(Node, CNode);
+ return;
}
case X86ISD::UMUL: {
@@ -2431,10 +2148,8 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue Ops[] = {N1, InFlag};
SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
- ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
- ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
- ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2));
- return nullptr;
+ ReplaceNode(Node, CNode);
+ return;
}
case ISD::SMUL_LOHI:
@@ -2506,24 +2221,32 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
if (foldedLoad) {
SDValue Chain;
+ MachineSDNode *CNode = nullptr;
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
InFlag };
if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) {
SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue);
- SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
+ CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
ResHi = SDValue(CNode, 0);
ResLo = SDValue(CNode, 1);
Chain = SDValue(CNode, 2);
InFlag = SDValue(CNode, 3);
} else {
SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
- SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
+ CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
Chain = SDValue(CNode, 0);
InFlag = SDValue(CNode, 1);
}
// Update the chain.
ReplaceUses(N1.getValue(1), Chain);
+ // Record the mem-refs
+ LoadSDNode *LoadNode = cast<LoadSDNode>(N1);
+ if (LoadNode) {
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = LoadNode->getMemOperand();
+ CNode->setMemRefs(MemOp, MemOp + 1);
+ }
} else {
SDValue Ops[] = { N1, InFlag };
if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) {
@@ -2583,7 +2306,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n');
}
- return nullptr;
+ return;
}
case ISD::SDIVREM:
@@ -2767,7 +2490,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
ReplaceUses(SDValue(Node, 1), Result);
DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
}
- return nullptr;
+ return;
}
case X86ISD::CMP:
@@ -2825,7 +2548,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// one, do not call ReplaceAllUsesWith.
ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
SDValue(NewNode, 0));
- return nullptr;
+ return;
}
// For example, "testl %eax, $2048" to "testb %ah, $8".
@@ -2862,7 +2585,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// one, do not call ReplaceAllUsesWith.
ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
SDValue(NewNode, 0));
- return nullptr;
+ return;
}
// For example, "testl %eax, $32776" to "testw %ax, $32776".
@@ -2885,7 +2608,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// one, do not call ReplaceAllUsesWith.
ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
SDValue(NewNode, 0));
- return nullptr;
+ return;
}
// For example, "testq %rax, $268468232" to "testl %eax, $268468232".
@@ -2908,7 +2631,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// one, do not call ReplaceAllUsesWith.
ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
SDValue(NewNode, 0));
- return nullptr;
+ return;
}
}
break;
@@ -2959,21 +2682,12 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
-
- return Result;
+ CurDAG->RemoveDeadNode(Node);
+ return;
}
}
- SDNode *ResNode = SelectCode(Node);
-
- DEBUG(dbgs() << "=> ";
- if (ResNode == nullptr || ResNode == Node)
- Node->dump(CurDAG);
- else
- ResNode->dump(CurDAG);
- dbgs() << '\n');
-
- return ResNode;
+ SelectCode(Node);
}
bool X86DAGToDAGISel::
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index dd9966f9e1791..e547111959008 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -71,9 +71,10 @@ static cl::opt<bool> ExperimentalVectorWideningLegalization(
X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
const X86Subtarget &STI)
- : TargetLowering(TM), Subtarget(&STI) {
- X86ScalarSSEf64 = Subtarget->hasSSE2();
- X86ScalarSSEf32 = Subtarget->hasSSE1();
+ : TargetLowering(TM), Subtarget(STI) {
+ bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
+ X86ScalarSSEf64 = Subtarget.hasSSE2();
+ X86ScalarSSEf32 = Subtarget.hasSSE1();
MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize());
// Set up the TargetLowering object.
@@ -86,24 +87,24 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// For 64-bit, since we have so many registers, use the ILP scheduler.
// For 32-bit, use the register pressure specific scheduling.
// For Atom, always use ILP scheduling.
- if (Subtarget->isAtom())
+ if (Subtarget.isAtom())
setSchedulingPreference(Sched::ILP);
- else if (Subtarget->is64Bit())
+ else if (Subtarget.is64Bit())
setSchedulingPreference(Sched::ILP);
else
setSchedulingPreference(Sched::RegPressure);
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
// Bypass expensive divides on Atom when compiling with O2.
if (TM.getOptLevel() >= CodeGenOpt::Default) {
- if (Subtarget->hasSlowDivide32())
+ if (Subtarget.hasSlowDivide32())
addBypassSlowDiv(32, 8);
- if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
+ if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
addBypassSlowDiv(64, 16);
}
- if (Subtarget->isTargetKnownWindowsMSVC()) {
+ if (Subtarget.isTargetKnownWindowsMSVC()) {
// Setup Windows compiler runtime calls.
setLibcallName(RTLIB::SDIV_I64, "_alldiv");
setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
@@ -117,11 +118,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
}
- if (Subtarget->isTargetDarwin()) {
+ if (Subtarget.isTargetDarwin()) {
// Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
setUseUnderscoreSetJmp(false);
setUseUnderscoreLongJmp(false);
- } else if (Subtarget->isTargetWindowsGNU()) {
+ } else if (Subtarget.isTargetWindowsGNU()) {
// MS runtime is weird: it exports _setjmp, but longjmp!
setUseUnderscoreSetJmp(true);
setUseUnderscoreLongJmp(false);
@@ -134,7 +135,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
addRegisterClass(MVT::i8, &X86::GR8RegClass);
addRegisterClass(MVT::i16, &X86::GR16RegClass);
addRegisterClass(MVT::i32, &X86::GR32RegClass);
- if (Subtarget->is64Bit())
+ if (Subtarget.is64Bit())
addRegisterClass(MVT::i64, &X86::GR64RegClass);
for (MVT VT : MVT::integer_valuetypes())
@@ -164,14 +165,14 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
- if (Subtarget->is64Bit()) {
- if (!Subtarget->useSoftFloat() && Subtarget->hasAVX512())
+ if (Subtarget.is64Bit()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512())
// f32/f64 are legal, f80 is custom.
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
else
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
- } else if (!Subtarget->useSoftFloat()) {
+ } else if (!Subtarget.useSoftFloat()) {
// We have an algorithm for SSE2->double, and we turn this into a
// 64-bit FILD followed by conditional FADD for other targets.
setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
@@ -185,8 +186,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
- if (!Subtarget->useSoftFloat()) {
- // SSE has no i16 to fp conversion, only i32
+ if (!Subtarget.useSoftFloat()) {
+ // SSE has no i16 to fp conversion, only i32.
if (X86ScalarSSEf32) {
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
// f32 and f64 cases are Legal, f80 case is not
@@ -205,7 +206,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
- if (!Subtarget->useSoftFloat()) {
+ if (!Subtarget.useSoftFloat()) {
// In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
// are Legal, f80 is custom lowered.
setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
@@ -231,8 +232,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
- if (Subtarget->is64Bit()) {
- if (!Subtarget->useSoftFloat() && Subtarget->hasAVX512()) {
+ if (Subtarget.is64Bit()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
// FP_TO_UINT-i32/i64 is legal for f32/f64, but custom for f80.
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
@@ -240,9 +241,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
}
- } else if (!Subtarget->useSoftFloat()) {
+ } else if (!Subtarget.useSoftFloat()) {
// Since AVX is a superset of SSE3, only check for SSE here.
- if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
+ if (Subtarget.hasSSE1() && !Subtarget.hasSSE3())
// Expand FP_TO_UINT into a select.
// FIXME: We would like to use a Custom expander here eventually to do
// the optimal thing for SSE vs. the default expansion in the legalizer.
@@ -260,12 +261,12 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
if (!X86ScalarSSEf64) {
setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
// Without SSE, i64->f64 goes through memory.
setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
}
- } else if (!Subtarget->is64Bit())
+ } else if (!Subtarget.is64Bit())
setOperationAction(ISD::BITCAST , MVT::i64 , Custom);
// Scalar integer divide and remainder are lowered to use operations that
@@ -295,72 +296,43 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::BR_JT , MVT::Other, Expand);
setOperationAction(ISD::BRCOND , MVT::Other, Custom);
- setOperationAction(ISD::BR_CC , MVT::f32, Expand);
- setOperationAction(ISD::BR_CC , MVT::f64, Expand);
- setOperationAction(ISD::BR_CC , MVT::f80, Expand);
- setOperationAction(ISD::BR_CC , MVT::f128, Expand);
- setOperationAction(ISD::BR_CC , MVT::i8, Expand);
- setOperationAction(ISD::BR_CC , MVT::i16, Expand);
- setOperationAction(ISD::BR_CC , MVT::i32, Expand);
- setOperationAction(ISD::BR_CC , MVT::i64, Expand);
- setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
- setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
- setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
- setOperationAction(ISD::SELECT_CC , MVT::f128, Expand);
- setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
- setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
- setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
- setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
- if (Subtarget->is64Bit())
+ for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
+ MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
+ setOperationAction(ISD::BR_CC, VT, Expand);
+ setOperationAction(ISD::SELECT_CC, VT, Expand);
+ }
+ if (Subtarget.is64Bit())
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
- if (Subtarget->is32Bit() && Subtarget->isTargetKnownWindowsMSVC()) {
- // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
- // is. We should promote the value to 64-bits to solve this.
- // This is what the CRT headers do - `fmodf` is an inline header
- // function casting to f64 and calling `fmod`.
- setOperationAction(ISD::FREM , MVT::f32 , Promote);
- } else {
- setOperationAction(ISD::FREM , MVT::f32 , Expand);
- }
-
+ setOperationAction(ISD::FREM , MVT::f32 , Expand);
setOperationAction(ISD::FREM , MVT::f64 , Expand);
setOperationAction(ISD::FREM , MVT::f80 , Expand);
setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
// Promote the i8 variants and force them on up to i32 which has a shorter
// encoding.
- setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
- AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
- AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
- if (Subtarget->hasBMI()) {
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
- if (Subtarget->is64Bit())
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
- } else {
+ setOperationPromotedToType(ISD::CTTZ , MVT::i8 , MVT::i32);
+ setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
+ if (!Subtarget.hasBMI()) {
setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
- if (Subtarget->is64Bit())
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal);
+ if (Subtarget.is64Bit()) {
setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
+ }
}
- if (Subtarget->hasLZCNT()) {
+ if (Subtarget.hasLZCNT()) {
// When promoting the i8 variants, force them to i32 for a shorter
// encoding.
- setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
- AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
- AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
- if (Subtarget->is64Bit())
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
+ setOperationPromotedToType(ISD::CTLZ , MVT::i8 , MVT::i32);
+ setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
} else {
setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
@@ -368,7 +340,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
}
@@ -377,7 +349,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// Special handling for half-precision floating point conversions.
// If we don't have F16C support, then lower half float conversions
// into library calls.
- if (Subtarget->useSoftFloat() || !Subtarget->hasF16C()) {
+ if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) {
setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
}
@@ -395,45 +367,34 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setTruncStoreAction(MVT::f64, MVT::f16, Expand);
setTruncStoreAction(MVT::f80, MVT::f16, Expand);
- if (Subtarget->hasPOPCNT()) {
+ if (Subtarget.hasPOPCNT()) {
setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
} else {
setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
- if (Subtarget->is64Bit())
+ if (Subtarget.is64Bit())
setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
}
setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
- if (!Subtarget->hasMOVBE())
+ if (!Subtarget.hasMOVBE())
setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
// These should be promoted to a larger select which is supported.
setOperationAction(ISD::SELECT , MVT::i1 , Promote);
// X86 wants to expand cmov itself.
- setOperationAction(ISD::SELECT , MVT::i8 , Custom);
- setOperationAction(ISD::SELECT , MVT::i16 , Custom);
- setOperationAction(ISD::SELECT , MVT::i32 , Custom);
- setOperationAction(ISD::SELECT , MVT::f32 , Custom);
- setOperationAction(ISD::SELECT , MVT::f64 , Custom);
- setOperationAction(ISD::SELECT , MVT::f80 , Custom);
- setOperationAction(ISD::SELECT , MVT::f128 , Custom);
- setOperationAction(ISD::SETCC , MVT::i8 , Custom);
- setOperationAction(ISD::SETCC , MVT::i16 , Custom);
- setOperationAction(ISD::SETCC , MVT::i32 , Custom);
- setOperationAction(ISD::SETCC , MVT::f32 , Custom);
- setOperationAction(ISD::SETCC , MVT::f64 , Custom);
- setOperationAction(ISD::SETCC , MVT::f80 , Custom);
- setOperationAction(ISD::SETCC , MVT::f128 , Custom);
- setOperationAction(ISD::SETCCE , MVT::i8 , Custom);
- setOperationAction(ISD::SETCCE , MVT::i16 , Custom);
- setOperationAction(ISD::SETCCE , MVT::i32 , Custom);
- if (Subtarget->is64Bit()) {
- setOperationAction(ISD::SELECT , MVT::i64 , Custom);
- setOperationAction(ISD::SETCC , MVT::i64 , Custom);
- setOperationAction(ISD::SETCCE , MVT::i64 , Custom);
+ for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
+ setOperationAction(ISD::SELECT, VT, Custom);
+ setOperationAction(ISD::SETCC, VT, Custom);
+ }
+ for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
+ if (VT == MVT::i64 && !Subtarget.is64Bit())
+ continue;
+ setOperationAction(ISD::SELECT, VT, Custom);
+ setOperationAction(ISD::SETCC, VT, Custom);
+ setOperationAction(ISD::SETCCE, VT, Custom);
}
setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
// NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
@@ -444,34 +405,31 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// LLVM/Clang supports zero-cost DWARF exception handling.
setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
+ setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
+ if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
+ setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
// Darwin ABI issue.
- setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
- setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
- setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
- setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
- if (Subtarget->is64Bit())
- setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
- setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
- setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
- if (Subtarget->is64Bit()) {
- setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
- setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
- setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
- setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
- setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
+ for (auto VT : { MVT::i32, MVT::i64 }) {
+ if (VT == MVT::i64 && !Subtarget.is64Bit())
+ continue;
+ setOperationAction(ISD::ConstantPool , VT, Custom);
+ setOperationAction(ISD::JumpTable , VT, Custom);
+ setOperationAction(ISD::GlobalAddress , VT, Custom);
+ setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
+ setOperationAction(ISD::ExternalSymbol , VT, Custom);
+ setOperationAction(ISD::BlockAddress , VT, Custom);
}
// 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
- setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
- setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
- setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
- if (Subtarget->is64Bit()) {
- setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
- setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
- setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
+ for (auto VT : { MVT::i32, MVT::i64 }) {
+ if (VT == MVT::i64 && !Subtarget.is64Bit())
+ continue;
+ setOperationAction(ISD::SHL_PARTS, VT, Custom);
+ setOperationAction(ISD::SRA_PARTS, VT, Custom);
+ setOperationAction(ISD::SRL_PARTS, VT, Custom);
}
- if (Subtarget->hasSSE1())
+ if (Subtarget.hasSSE1())
setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
@@ -480,16 +438,21 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
}
- if (Subtarget->hasCmpxchg16b()) {
+ if (Subtarget.hasCmpxchg16b()) {
setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
}
// FIXME - use subtarget debug flags
- if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
- !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
+ if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
+ !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
+ TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
}
@@ -505,14 +468,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
- if (Subtarget->is64Bit()) {
- setOperationAction(ISD::VAARG , MVT::Other, Custom);
- setOperationAction(ISD::VACOPY , MVT::Other, Custom);
- } else {
- // TargetInfo::CharPtrBuiltinVaList
- setOperationAction(ISD::VAARG , MVT::Other, Expand);
- setOperationAction(ISD::VACOPY , MVT::Other, Expand);
- }
+ bool Is64Bit = Subtarget.is64Bit();
+ setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand);
+ setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
@@ -523,41 +481,37 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
- if (!Subtarget->useSoftFloat() && X86ScalarSSEf64) {
+ if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
// f32 and f64 use SSE.
// Set up the FP register classes.
addRegisterClass(MVT::f32, &X86::FR32RegClass);
addRegisterClass(MVT::f64, &X86::FR64RegClass);
- // Use ANDPD to simulate FABS.
- setOperationAction(ISD::FABS , MVT::f64, Custom);
- setOperationAction(ISD::FABS , MVT::f32, Custom);
+ for (auto VT : { MVT::f32, MVT::f64 }) {
+ // Use ANDPD to simulate FABS.
+ setOperationAction(ISD::FABS, VT, Custom);
- // Use XORP to simulate FNEG.
- setOperationAction(ISD::FNEG , MVT::f64, Custom);
- setOperationAction(ISD::FNEG , MVT::f32, Custom);
+ // Use XORP to simulate FNEG.
+ setOperationAction(ISD::FNEG, VT, Custom);
- // Use ANDPD and ORPD to simulate FCOPYSIGN.
- setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
- setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
+ // Use ANDPD and ORPD to simulate FCOPYSIGN.
+ setOperationAction(ISD::FCOPYSIGN, VT, Custom);
+
+ // We don't support sin/cos/fmod
+ setOperationAction(ISD::FSIN , VT, Expand);
+ setOperationAction(ISD::FCOS , VT, Expand);
+ setOperationAction(ISD::FSINCOS, VT, Expand);
+ }
- // Lower this to FGETSIGNx86 plus an AND.
+ // Lower this to MOVMSK plus an AND.
setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
- // We don't support sin/cos/fmod
- setOperationAction(ISD::FSIN , MVT::f64, Expand);
- setOperationAction(ISD::FCOS , MVT::f64, Expand);
- setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
- setOperationAction(ISD::FSIN , MVT::f32, Expand);
- setOperationAction(ISD::FCOS , MVT::f32, Expand);
- setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
-
// Expand FP immediates into loads from the stack, except for the special
// cases we handle.
addLegalFPImmediate(APFloat(+0.0)); // xorpd
addLegalFPImmediate(APFloat(+0.0f)); // xorps
- } else if (!Subtarget->useSoftFloat() && X86ScalarSSEf32) {
+ } else if (UseX87 && X86ScalarSSEf32) {
// Use SSE for f32, x87 for f64.
// Set up the FP register classes.
addRegisterClass(MVT::f32, &X86::FR32RegClass);
@@ -592,24 +546,21 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FCOS , MVT::f64, Expand);
setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
}
- } else if (!Subtarget->useSoftFloat()) {
+ } else if (UseX87) {
// f32 and f64 in x87.
// Set up the FP register classes.
addRegisterClass(MVT::f64, &X86::RFP64RegClass);
addRegisterClass(MVT::f32, &X86::RFP32RegClass);
- setOperationAction(ISD::UNDEF, MVT::f64, Expand);
- setOperationAction(ISD::UNDEF, MVT::f32, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+ for (auto VT : { MVT::f32, MVT::f64 }) {
+ setOperationAction(ISD::UNDEF, VT, Expand);
+ setOperationAction(ISD::FCOPYSIGN, VT, Expand);
- if (!TM.Options.UnsafeFPMath) {
- setOperationAction(ISD::FSIN , MVT::f64, Expand);
- setOperationAction(ISD::FSIN , MVT::f32, Expand);
- setOperationAction(ISD::FCOS , MVT::f64, Expand);
- setOperationAction(ISD::FCOS , MVT::f32, Expand);
- setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
- setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
+ if (!TM.Options.UnsafeFPMath) {
+ setOperationAction(ISD::FSIN , VT, Expand);
+ setOperationAction(ISD::FCOS , VT, Expand);
+ setOperationAction(ISD::FSINCOS, VT, Expand);
+ }
}
addLegalFPImmediate(APFloat(+0.0)); // FLD0
addLegalFPImmediate(APFloat(+1.0)); // FLD1
@@ -626,8 +577,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FMA, MVT::f32, Expand);
// Long double always uses X87, except f128 in MMX.
- if (!Subtarget->useSoftFloat()) {
- if (Subtarget->is64Bit() && Subtarget->hasMMX()) {
+ if (UseX87) {
+ if (Subtarget.is64Bit() && Subtarget.hasMMX()) {
addRegisterClass(MVT::f128, &X86::FR128RegClass);
ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
setOperationAction(ISD::FABS , MVT::f128, Custom);
@@ -680,38 +631,36 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
+ // Some FP actions are always expanded for vector types.
+ for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32,
+ MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
+ setOperationAction(ISD::FSIN, VT, Expand);
+ setOperationAction(ISD::FSINCOS, VT, Expand);
+ setOperationAction(ISD::FCOS, VT, Expand);
+ setOperationAction(ISD::FREM, VT, Expand);
+ setOperationAction(ISD::FPOWI, VT, Expand);
+ setOperationAction(ISD::FCOPYSIGN, VT, Expand);
+ setOperationAction(ISD::FPOW, VT, Expand);
+ setOperationAction(ISD::FLOG, VT, Expand);
+ setOperationAction(ISD::FLOG2, VT, Expand);
+ setOperationAction(ISD::FLOG10, VT, Expand);
+ setOperationAction(ISD::FEXP, VT, Expand);
+ setOperationAction(ISD::FEXP2, VT, Expand);
+ }
+
// First set operation action for all vector types to either promote
// (for widening) or expand (for scalarization). Then we will selectively
// turn on ones that can be effectively codegen'd.
for (MVT VT : MVT::vector_valuetypes()) {
- setOperationAction(ISD::ADD , VT, Expand);
- setOperationAction(ISD::SUB , VT, Expand);
- setOperationAction(ISD::FADD, VT, Expand);
- setOperationAction(ISD::FNEG, VT, Expand);
- setOperationAction(ISD::FSUB, VT, Expand);
- setOperationAction(ISD::MUL , VT, Expand);
- setOperationAction(ISD::FMUL, VT, Expand);
setOperationAction(ISD::SDIV, VT, Expand);
setOperationAction(ISD::UDIV, VT, Expand);
- setOperationAction(ISD::FDIV, VT, Expand);
setOperationAction(ISD::SREM, VT, Expand);
setOperationAction(ISD::UREM, VT, Expand);
- setOperationAction(ISD::LOAD, VT, Expand);
- setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
- setOperationAction(ISD::FABS, VT, Expand);
- setOperationAction(ISD::FSIN, VT, Expand);
- setOperationAction(ISD::FSINCOS, VT, Expand);
- setOperationAction(ISD::FCOS, VT, Expand);
- setOperationAction(ISD::FSINCOS, VT, Expand);
- setOperationAction(ISD::FREM, VT, Expand);
setOperationAction(ISD::FMA, VT, Expand);
- setOperationAction(ISD::FPOWI, VT, Expand);
- setOperationAction(ISD::FSQRT, VT, Expand);
- setOperationAction(ISD::FCOPYSIGN, VT, Expand);
setOperationAction(ISD::FFLOOR, VT, Expand);
setOperationAction(ISD::FCEIL, VT, Expand);
setOperationAction(ISD::FTRUNC, VT, Expand);
@@ -723,24 +672,13 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::MULHU, VT, Expand);
setOperationAction(ISD::SDIVREM, VT, Expand);
setOperationAction(ISD::UDIVREM, VT, Expand);
- setOperationAction(ISD::FPOW, VT, Expand);
setOperationAction(ISD::CTPOP, VT, Expand);
setOperationAction(ISD::CTTZ, VT, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::CTLZ, VT, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
- setOperationAction(ISD::SHL, VT, Expand);
- setOperationAction(ISD::SRA, VT, Expand);
- setOperationAction(ISD::SRL, VT, Expand);
setOperationAction(ISD::ROTL, VT, Expand);
setOperationAction(ISD::ROTR, VT, Expand);
setOperationAction(ISD::BSWAP, VT, Expand);
setOperationAction(ISD::SETCC, VT, Expand);
- setOperationAction(ISD::FLOG, VT, Expand);
- setOperationAction(ISD::FLOG2, VT, Expand);
- setOperationAction(ISD::FLOG10, VT, Expand);
- setOperationAction(ISD::FEXP, VT, Expand);
- setOperationAction(ISD::FEXP2, VT, Expand);
setOperationAction(ISD::FP_TO_UINT, VT, Expand);
setOperationAction(ISD::FP_TO_SINT, VT, Expand);
setOperationAction(ISD::UINT_TO_FP, VT, Expand);
@@ -750,7 +688,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
setOperationAction(ISD::ANY_EXTEND, VT, Expand);
- setOperationAction(ISD::VSELECT, VT, Expand);
setOperationAction(ISD::SELECT_CC, VT, Expand);
for (MVT InnerVT : MVT::vector_valuetypes()) {
setTruncStoreAction(InnerVT, VT, Expand);
@@ -774,35 +711,16 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// FIXME: In order to prevent SSE instructions being expanded to MMX ones
// with -msoft-float, disable use of MMX as well.
- if (!Subtarget->useSoftFloat() && Subtarget->hasMMX()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
// No operations on x86mmx supported, everything uses intrinsics.
}
- // MMX-sized vectors (other than x86mmx) are expected to be expanded
- // into smaller operations.
- for (MVT MMXTy : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v1i64}) {
- setOperationAction(ISD::MULHS, MMXTy, Expand);
- setOperationAction(ISD::AND, MMXTy, Expand);
- setOperationAction(ISD::OR, MMXTy, Expand);
- setOperationAction(ISD::XOR, MMXTy, Expand);
- setOperationAction(ISD::SCALAR_TO_VECTOR, MMXTy, Expand);
- setOperationAction(ISD::SELECT, MMXTy, Expand);
- setOperationAction(ISD::BITCAST, MMXTy, Expand);
- }
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
-
- if (!Subtarget->useSoftFloat() && Subtarget->hasSSE1()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
- setOperationAction(ISD::FADD, MVT::v4f32, Legal);
- setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
- setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
- setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
- setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
setOperationAction(ISD::FABS, MVT::v4f32, Custom);
- setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
@@ -811,7 +729,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
}
- if (!Subtarget->useSoftFloat() && Subtarget->hasSSE2()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
// FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
@@ -821,27 +739,16 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
- setOperationAction(ISD::ADD, MVT::v16i8, Legal);
- setOperationAction(ISD::ADD, MVT::v8i16, Legal);
- setOperationAction(ISD::ADD, MVT::v4i32, Legal);
- setOperationAction(ISD::ADD, MVT::v2i64, Legal);
setOperationAction(ISD::MUL, MVT::v16i8, Custom);
setOperationAction(ISD::MUL, MVT::v4i32, Custom);
setOperationAction(ISD::MUL, MVT::v2i64, Custom);
setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
+ setOperationAction(ISD::MULHU, MVT::v16i8, Custom);
+ setOperationAction(ISD::MULHS, MVT::v16i8, Custom);
setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
- setOperationAction(ISD::SUB, MVT::v16i8, Legal);
- setOperationAction(ISD::SUB, MVT::v8i16, Legal);
- setOperationAction(ISD::SUB, MVT::v4i32, Legal);
- setOperationAction(ISD::SUB, MVT::v2i64, Legal);
setOperationAction(ISD::MUL, MVT::v8i16, Legal);
- setOperationAction(ISD::FADD, MVT::v2f64, Legal);
- setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
- setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
- setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
- setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
setOperationAction(ISD::FABS, MVT::v2f64, Custom);
@@ -870,10 +777,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::CTTZ, MVT::v8i16, Custom);
setOperationAction(ISD::CTTZ, MVT::v4i32, Custom);
// ISD::CTTZ v2i64 - scalarization is faster.
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
- // ISD::CTTZ_ZERO_UNDEF v2i64 - scalarization is faster.
// Custom lower build_vector, vector_shuffle, and extract_vector_elt.
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
@@ -899,37 +802,28 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
}
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
- setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
- setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
+ for (auto VT : { MVT::v2f64, MVT::v2i64 }) {
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+ setOperationAction(ISD::VSELECT, VT, Custom);
+
+ if (VT == MVT::v2i64 && !Subtarget.is64Bit())
+ continue;
- if (Subtarget->is64Bit()) {
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
}
// Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
- setOperationAction(ISD::AND, VT, Promote);
- AddPromotedToType (ISD::AND, VT, MVT::v2i64);
- setOperationAction(ISD::OR, VT, Promote);
- AddPromotedToType (ISD::OR, VT, MVT::v2i64);
- setOperationAction(ISD::XOR, VT, Promote);
- AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
- setOperationAction(ISD::LOAD, VT, Promote);
- AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
- setOperationAction(ISD::SELECT, VT, Promote);
- AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
+ setOperationPromotedToType(ISD::AND, VT, MVT::v2i64);
+ setOperationPromotedToType(ISD::OR, VT, MVT::v2i64);
+ setOperationPromotedToType(ISD::XOR, VT, MVT::v2i64);
+ setOperationPromotedToType(ISD::LOAD, VT, MVT::v2i64);
+ setOperationPromotedToType(ISD::SELECT, VT, MVT::v2i64);
}
// Custom lower v2i64 and v2f64 selects.
- setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
- setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
@@ -942,7 +836,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
// As there is no 64-bit GPR available, we need build a special custom
// sequence to convert from v2i32 to v2f32.
- if (!Subtarget->is64Bit())
+ if (!Subtarget.is64Bit())
setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
@@ -954,9 +848,35 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
+
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
+
+ for (auto VT : { MVT::v8i16, MVT::v16i8 }) {
+ setOperationAction(ISD::SRL, VT, Custom);
+ setOperationAction(ISD::SHL, VT, Custom);
+ setOperationAction(ISD::SRA, VT, Custom);
+ }
+
+ // In the customized shift lowering, the legal cases in AVX2 will be
+ // recognized.
+ for (auto VT : { MVT::v4i32, MVT::v2i64 }) {
+ setOperationAction(ISD::SRL, VT, Custom);
+ setOperationAction(ISD::SHL, VT, Custom);
+ setOperationAction(ISD::SRA, VT, Custom);
+ }
+ }
+
+ if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
+ setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom);
+ setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
+ setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
+ // ISD::CTLZ v4i32 - scalarization is faster.
+ // ISD::CTLZ v2i64 - scalarization is faster.
}
- if (!Subtarget->useSoftFloat() && Subtarget->hasSSE41()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
setOperationAction(ISD::FFLOOR, RoundedTy, Legal);
setOperationAction(ISD::FCEIL, RoundedTy, Legal);
@@ -1004,66 +924,28 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
- // i8 and i16 vectors are custom because the source register and source
- // source memory operand types are not the same width. f32 vectors are
- // custom since the immediate controlling the insert encodes additional
- // information.
+ // i8 vectors are custom because the source register and source
+ // source memory operand types are not the same width.
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
-
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
-
- // FIXME: these should be Legal, but that's only for the case where
- // the index is constant. For now custom expand to deal with that.
- if (Subtarget->is64Bit()) {
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
- }
}
- if (Subtarget->hasSSE2()) {
- setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
- setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
- setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
-
- setOperationAction(ISD::SRL, MVT::v8i16, Custom);
- setOperationAction(ISD::SRL, MVT::v16i8, Custom);
+ if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
+ for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
+ MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
+ setOperationAction(ISD::ROTL, VT, Custom);
- setOperationAction(ISD::SHL, MVT::v8i16, Custom);
- setOperationAction(ISD::SHL, MVT::v16i8, Custom);
-
- setOperationAction(ISD::SRA, MVT::v8i16, Custom);
- setOperationAction(ISD::SRA, MVT::v16i8, Custom);
-
- // In the customized shift lowering, the legal cases in AVX2 will be
- // recognized.
- setOperationAction(ISD::SRL, MVT::v2i64, Custom);
- setOperationAction(ISD::SRL, MVT::v4i32, Custom);
+ // XOP can efficiently perform BITREVERSE with VPPERM.
+ for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
+ setOperationAction(ISD::BITREVERSE, VT, Custom);
- setOperationAction(ISD::SHL, MVT::v2i64, Custom);
- setOperationAction(ISD::SHL, MVT::v4i32, Custom);
-
- setOperationAction(ISD::SRA, MVT::v2i64, Custom);
- setOperationAction(ISD::SRA, MVT::v4i32, Custom);
+ for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
+ MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
+ setOperationAction(ISD::BITREVERSE, VT, Custom);
}
- if (Subtarget->hasXOP()) {
- setOperationAction(ISD::ROTL, MVT::v16i8, Custom);
- setOperationAction(ISD::ROTL, MVT::v8i16, Custom);
- setOperationAction(ISD::ROTL, MVT::v4i32, Custom);
- setOperationAction(ISD::ROTL, MVT::v2i64, Custom);
- setOperationAction(ISD::ROTL, MVT::v32i8, Custom);
- setOperationAction(ISD::ROTL, MVT::v16i16, Custom);
- setOperationAction(ISD::ROTL, MVT::v8i32, Custom);
- setOperationAction(ISD::ROTL, MVT::v4i64, Custom);
- }
+ if (!Subtarget.useSoftFloat() && Subtarget.hasFp256()) {
+ bool HasInt256 = Subtarget.hasInt256();
- if (!Subtarget->useSoftFloat() && Subtarget->hasFp256()) {
addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
@@ -1071,35 +953,15 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
- setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
- setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
- setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
-
- setOperationAction(ISD::FADD, MVT::v8f32, Legal);
- setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
- setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
- setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
- setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
- setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
- setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
- setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
- setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
- setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
- setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
- setOperationAction(ISD::FABS, MVT::v8f32, Custom);
-
- setOperationAction(ISD::FADD, MVT::v4f64, Legal);
- setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
- setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
- setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
- setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
- setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
- setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
- setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
- setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
- setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
- setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
- setOperationAction(ISD::FABS, MVT::v4f64, Custom);
+ for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
+ setOperationAction(ISD::FFLOOR, VT, Legal);
+ setOperationAction(ISD::FCEIL, VT, Legal);
+ setOperationAction(ISD::FTRUNC, VT, Legal);
+ setOperationAction(ISD::FRINT, VT, Legal);
+ setOperationAction(ISD::FNEARBYINT, VT, Legal);
+ setOperationAction(ISD::FNEG, VT, Custom);
+ setOperationAction(ISD::FABS, VT, Custom);
+ }
// (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
// even though v8i16 is a legal type.
@@ -1117,14 +979,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
for (MVT VT : MVT::fp_vector_valuetypes())
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
- setOperationAction(ISD::SRL, MVT::v16i16, Custom);
- setOperationAction(ISD::SRL, MVT::v32i8, Custom);
-
- setOperationAction(ISD::SHL, MVT::v16i16, Custom);
- setOperationAction(ISD::SHL, MVT::v32i8, Custom);
-
- setOperationAction(ISD::SRA, MVT::v16i16, Custom);
- setOperationAction(ISD::SRA, MVT::v32i8, Custom);
+ for (auto VT : { MVT::v32i8, MVT::v16i16 }) {
+ setOperationAction(ISD::SRL, VT, Custom);
+ setOperationAction(ISD::SHL, VT, Custom);
+ setOperationAction(ISD::SRA, VT, Custom);
+ }
setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
@@ -1147,63 +1006,57 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
+ setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom);
+
+ for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
+ setOperationAction(ISD::CTPOP, VT, Custom);
+ setOperationAction(ISD::CTTZ, VT, Custom);
+ }
+
+ // ISD::CTLZ v8i32/v4i64 - scalarization is faster without AVX2
+ // as we end up splitting the 256-bit vectors.
+ for (auto VT : { MVT::v32i8, MVT::v16i16 })
+ setOperationAction(ISD::CTLZ, VT, Custom);
+
+ if (HasInt256)
+ for (auto VT : { MVT::v8i32, MVT::v4i64 })
+ setOperationAction(ISD::CTLZ, VT, Custom);
- setOperationAction(ISD::CTPOP, MVT::v32i8, Custom);
- setOperationAction(ISD::CTPOP, MVT::v16i16, Custom);
- setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
- setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
-
- setOperationAction(ISD::CTTZ, MVT::v32i8, Custom);
- setOperationAction(ISD::CTTZ, MVT::v16i16, Custom);
- setOperationAction(ISD::CTTZ, MVT::v8i32, Custom);
- setOperationAction(ISD::CTTZ, MVT::v4i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v32i8, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i16, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i32, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i64, Custom);
-
- if (Subtarget->hasAnyFMA()) {
- setOperationAction(ISD::FMA, MVT::v8f32, Legal);
- setOperationAction(ISD::FMA, MVT::v4f64, Legal);
- setOperationAction(ISD::FMA, MVT::v4f32, Legal);
- setOperationAction(ISD::FMA, MVT::v2f64, Legal);
- setOperationAction(ISD::FMA, MVT::f32, Legal);
- setOperationAction(ISD::FMA, MVT::f64, Legal);
- }
-
- if (Subtarget->hasInt256()) {
- setOperationAction(ISD::ADD, MVT::v4i64, Legal);
- setOperationAction(ISD::ADD, MVT::v8i32, Legal);
- setOperationAction(ISD::ADD, MVT::v16i16, Legal);
- setOperationAction(ISD::ADD, MVT::v32i8, Legal);
-
- setOperationAction(ISD::SUB, MVT::v4i64, Legal);
- setOperationAction(ISD::SUB, MVT::v8i32, Legal);
- setOperationAction(ISD::SUB, MVT::v16i16, Legal);
- setOperationAction(ISD::SUB, MVT::v32i8, Legal);
-
- setOperationAction(ISD::MUL, MVT::v4i64, Custom);
- setOperationAction(ISD::MUL, MVT::v8i32, Legal);
- setOperationAction(ISD::MUL, MVT::v16i16, Legal);
- setOperationAction(ISD::MUL, MVT::v32i8, Custom);
-
- setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
- setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
- setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
- setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
-
- setOperationAction(ISD::SMAX, MVT::v32i8, Legal);
- setOperationAction(ISD::SMAX, MVT::v16i16, Legal);
- setOperationAction(ISD::SMAX, MVT::v8i32, Legal);
- setOperationAction(ISD::UMAX, MVT::v32i8, Legal);
- setOperationAction(ISD::UMAX, MVT::v16i16, Legal);
- setOperationAction(ISD::UMAX, MVT::v8i32, Legal);
- setOperationAction(ISD::SMIN, MVT::v32i8, Legal);
- setOperationAction(ISD::SMIN, MVT::v16i16, Legal);
- setOperationAction(ISD::SMIN, MVT::v8i32, Legal);
- setOperationAction(ISD::UMIN, MVT::v32i8, Legal);
- setOperationAction(ISD::UMIN, MVT::v16i16, Legal);
- setOperationAction(ISD::UMIN, MVT::v8i32, Legal);
+ if (Subtarget.hasAnyFMA()) {
+ for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
+ MVT::v2f64, MVT::v4f64 })
+ setOperationAction(ISD::FMA, VT, Legal);
+ }
+
+ for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
+ setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
+ setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
+ }
+
+ setOperationAction(ISD::MUL, MVT::v4i64, Custom);
+ setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom);
+ setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom);
+ setOperationAction(ISD::MUL, MVT::v32i8, Custom);
+
+ setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
+ setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
+
+ setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom);
+ setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom);
+ setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
+ setOperationAction(ISD::MULHS, MVT::v32i8, Custom);
+
+ for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
+ setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
+ setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
+ setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
+ setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
+ }
+
+ if (HasInt256) {
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i64, Custom);
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i32, Custom);
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v16i16, Custom);
// The custom lowering for UINT_TO_FP for v8i32 becomes interesting
// when we have a 256bit-wide blend with immediate.
@@ -1223,62 +1076,32 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
- } else {
- setOperationAction(ISD::ADD, MVT::v4i64, Custom);
- setOperationAction(ISD::ADD, MVT::v8i32, Custom);
- setOperationAction(ISD::ADD, MVT::v16i16, Custom);
- setOperationAction(ISD::ADD, MVT::v32i8, Custom);
-
- setOperationAction(ISD::SUB, MVT::v4i64, Custom);
- setOperationAction(ISD::SUB, MVT::v8i32, Custom);
- setOperationAction(ISD::SUB, MVT::v16i16, Custom);
- setOperationAction(ISD::SUB, MVT::v32i8, Custom);
-
- setOperationAction(ISD::MUL, MVT::v4i64, Custom);
- setOperationAction(ISD::MUL, MVT::v8i32, Custom);
- setOperationAction(ISD::MUL, MVT::v16i16, Custom);
- setOperationAction(ISD::MUL, MVT::v32i8, Custom);
-
- setOperationAction(ISD::SMAX, MVT::v32i8, Custom);
- setOperationAction(ISD::SMAX, MVT::v16i16, Custom);
- setOperationAction(ISD::SMAX, MVT::v8i32, Custom);
- setOperationAction(ISD::UMAX, MVT::v32i8, Custom);
- setOperationAction(ISD::UMAX, MVT::v16i16, Custom);
- setOperationAction(ISD::UMAX, MVT::v8i32, Custom);
- setOperationAction(ISD::SMIN, MVT::v32i8, Custom);
- setOperationAction(ISD::SMIN, MVT::v16i16, Custom);
- setOperationAction(ISD::SMIN, MVT::v8i32, Custom);
- setOperationAction(ISD::UMIN, MVT::v32i8, Custom);
- setOperationAction(ISD::UMIN, MVT::v16i16, Custom);
- setOperationAction(ISD::UMIN, MVT::v8i32, Custom);
}
// In the customized shift lowering, the legal cases in AVX2 will be
// recognized.
- setOperationAction(ISD::SRL, MVT::v4i64, Custom);
- setOperationAction(ISD::SRL, MVT::v8i32, Custom);
+ for (auto VT : { MVT::v8i32, MVT::v4i64 }) {
+ setOperationAction(ISD::SRL, VT, Custom);
+ setOperationAction(ISD::SHL, VT, Custom);
+ setOperationAction(ISD::SRA, VT, Custom);
+ }
- setOperationAction(ISD::SHL, MVT::v4i64, Custom);
- setOperationAction(ISD::SHL, MVT::v8i32, Custom);
+ for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
+ MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
+ setOperationAction(ISD::MLOAD, VT, Legal);
+ setOperationAction(ISD::MSTORE, VT, Legal);
+ }
- setOperationAction(ISD::SRA, MVT::v4i64, Custom);
- setOperationAction(ISD::SRA, MVT::v8i32, Custom);
+ // Extract subvector is special because the value type
+ // (result) is 128-bit but the source is 256-bit wide.
+ for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
+ MVT::v4f32, MVT::v2f64 }) {
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
+ }
// Custom lower several nodes for 256-bit types.
- for (MVT VT : MVT::vector_valuetypes()) {
- if (VT.getScalarSizeInBits() >= 32) {
- setOperationAction(ISD::MLOAD, VT, Legal);
- setOperationAction(ISD::MSTORE, VT, Legal);
- }
- // Extract subvector is special because the value type
- // (result) is 128-bit but the source is 256-bit wide.
- if (VT.is128BitVector()) {
- setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
- }
- // Do not attempt to custom lower other non-256-bit vectors
- if (!VT.is256BitVector())
- continue;
-
+ for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
+ MVT::v8f32, MVT::v4f64 }) {
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Custom);
@@ -1289,25 +1112,20 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
}
- if (Subtarget->hasInt256())
+ if (HasInt256)
setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
// Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
- setOperationAction(ISD::AND, VT, Promote);
- AddPromotedToType (ISD::AND, VT, MVT::v4i64);
- setOperationAction(ISD::OR, VT, Promote);
- AddPromotedToType (ISD::OR, VT, MVT::v4i64);
- setOperationAction(ISD::XOR, VT, Promote);
- AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
- setOperationAction(ISD::LOAD, VT, Promote);
- AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
- setOperationAction(ISD::SELECT, VT, Promote);
- AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
+ setOperationPromotedToType(ISD::AND, VT, MVT::v4i64);
+ setOperationPromotedToType(ISD::OR, VT, MVT::v4i64);
+ setOperationPromotedToType(ISD::XOR, VT, MVT::v4i64);
+ setOperationPromotedToType(ISD::LOAD, VT, MVT::v4i64);
+ setOperationPromotedToType(ISD::SELECT, VT, MVT::v4i64);
}
}
- if (!Subtarget->useSoftFloat() && Subtarget->hasAVX512()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
@@ -1320,19 +1138,14 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
for (MVT VT : MVT::fp_vector_valuetypes())
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i32, MVT::v16i8, Legal);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v16i32, MVT::v16i8, Legal);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i32, MVT::v16i16, Legal);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v16i32, MVT::v16i16, Legal);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v32i16, MVT::v32i8, Legal);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v32i16, MVT::v32i8, Legal);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i64, MVT::v8i8, Legal);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v8i64, MVT::v8i8, Legal);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i64, MVT::v8i16, Legal);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v8i64, MVT::v8i16, Legal);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i64, MVT::v8i32, Legal);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v8i64, MVT::v8i32, Legal);
-
+ for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}) {
+ setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8, Legal);
+ setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
+ setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
+ setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i8, Legal);
+ setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal);
+ setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal);
+ }
setOperationAction(ISD::BR_CC, MVT::i1, Expand);
setOperationAction(ISD::SETCC, MVT::i1, Custom);
setOperationAction(ISD::SETCCE, MVT::i1, Custom);
@@ -1343,29 +1156,22 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::SUB, MVT::i1, Custom);
setOperationAction(ISD::ADD, MVT::i1, Custom);
setOperationAction(ISD::MUL, MVT::i1, Custom);
- setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
- setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
- setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
- setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
- setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
-
- setOperationAction(ISD::FADD, MVT::v16f32, Legal);
- setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
- setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
- setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
- setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
- setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
- setOperationAction(ISD::FABS, MVT::v16f32, Custom);
-
- setOperationAction(ISD::FADD, MVT::v8f64, Legal);
- setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
- setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
- setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
- setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
- setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
- setOperationAction(ISD::FABS, MVT::v8f64, Custom);
- setOperationAction(ISD::FMA, MVT::v8f64, Legal);
- setOperationAction(ISD::FMA, MVT::v16f32, Legal);
+
+ for (MVT VT : {MVT::v2i64, MVT::v4i32, MVT::v8i32, MVT::v4i64, MVT::v8i16,
+ MVT::v16i8, MVT::v16i16, MVT::v32i8, MVT::v16i32,
+ MVT::v8i64, MVT::v32i16, MVT::v64i8}) {
+ MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
+ setLoadExtAction(ISD::SEXTLOAD, VT, MaskVT, Custom);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MaskVT, Custom);
+ setLoadExtAction(ISD::EXTLOAD, VT, MaskVT, Custom);
+ setTruncStoreAction(VT, MaskVT, Custom);
+ }
+
+ for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
+ setOperationAction(ISD::FNEG, VT, Custom);
+ setOperationAction(ISD::FABS, VT, Custom);
+ setOperationAction(ISD::FMA, VT, Legal);
+ }
setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
@@ -1389,7 +1195,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal);
setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal);
setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal);
- if (Subtarget->hasVLX()){
+ if (Subtarget.hasVLX()){
setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal);
setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
@@ -1412,15 +1218,14 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i1, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i1, Custom);
- if (Subtarget->hasDQI()) {
- setOperationAction(ISD::TRUNCATE, MVT::v2i1, Custom);
- setOperationAction(ISD::TRUNCATE, MVT::v4i1, Custom);
-
+ setOperationAction(ISD::VSELECT, MVT::v8i1, Expand);
+ setOperationAction(ISD::VSELECT, MVT::v16i1, Expand);
+ if (Subtarget.hasDQI()) {
setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal);
- if (Subtarget->hasVLX()) {
+ if (Subtarget.hasVLX()) {
setOperationAction(ISD::SINT_TO_FP, MVT::v4i64, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v4i64, Legal);
@@ -1431,7 +1236,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
}
}
- if (Subtarget->hasVLX()) {
+ if (Subtarget.hasVLX()) {
setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
@@ -1440,7 +1245,22 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
+ setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Custom);
+ setOperationAction(ISD::ZERO_EXTEND, MVT::v2i64, Custom);
+
+ // FIXME. This commands are available on SSE/AVX2, add relevant patterns.
+ setLoadExtAction(ISD::EXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
}
+
setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
@@ -1453,20 +1273,17 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
- if (Subtarget->hasDQI()) {
+ if (Subtarget.hasDQI()) {
setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v2i64, Custom);
}
- setOperationAction(ISD::FFLOOR, MVT::v16f32, Legal);
- setOperationAction(ISD::FFLOOR, MVT::v8f64, Legal);
- setOperationAction(ISD::FCEIL, MVT::v16f32, Legal);
- setOperationAction(ISD::FCEIL, MVT::v8f64, Legal);
- setOperationAction(ISD::FTRUNC, MVT::v16f32, Legal);
- setOperationAction(ISD::FTRUNC, MVT::v8f64, Legal);
- setOperationAction(ISD::FRINT, MVT::v16f32, Legal);
- setOperationAction(ISD::FRINT, MVT::v8f64, Legal);
- setOperationAction(ISD::FNEARBYINT, MVT::v16f32, Legal);
- setOperationAction(ISD::FNEARBYINT, MVT::v8f64, Legal);
+ for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
+ setOperationAction(ISD::FFLOOR, VT, Legal);
+ setOperationAction(ISD::FCEIL, VT, Legal);
+ setOperationAction(ISD::FTRUNC, VT, Legal);
+ setOperationAction(ISD::FRINT, VT, Legal);
+ setOperationAction(ISD::FNEARBYINT, VT, Legal);
+ }
setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
@@ -1501,139 +1318,115 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UMIN, MVT::v16i32, Legal);
setOperationAction(ISD::UMIN, MVT::v8i64, Legal);
- setOperationAction(ISD::ADD, MVT::v8i64, Legal);
- setOperationAction(ISD::ADD, MVT::v16i32, Legal);
-
- setOperationAction(ISD::SUB, MVT::v8i64, Legal);
- setOperationAction(ISD::SUB, MVT::v16i32, Legal);
+ setOperationAction(ISD::ADD, MVT::v8i1, Expand);
+ setOperationAction(ISD::ADD, MVT::v16i1, Expand);
+ setOperationAction(ISD::SUB, MVT::v8i1, Expand);
+ setOperationAction(ISD::SUB, MVT::v16i1, Expand);
+ setOperationAction(ISD::MUL, MVT::v8i1, Expand);
+ setOperationAction(ISD::MUL, MVT::v16i1, Expand);
setOperationAction(ISD::MUL, MVT::v16i32, Legal);
- setOperationAction(ISD::SRL, MVT::v8i64, Custom);
- setOperationAction(ISD::SRL, MVT::v16i32, Custom);
-
- setOperationAction(ISD::SHL, MVT::v8i64, Custom);
- setOperationAction(ISD::SHL, MVT::v16i32, Custom);
-
- setOperationAction(ISD::SRA, MVT::v8i64, Custom);
- setOperationAction(ISD::SRA, MVT::v16i32, Custom);
-
- setOperationAction(ISD::AND, MVT::v8i64, Legal);
- setOperationAction(ISD::OR, MVT::v8i64, Legal);
- setOperationAction(ISD::XOR, MVT::v8i64, Legal);
- setOperationAction(ISD::AND, MVT::v16i32, Legal);
- setOperationAction(ISD::OR, MVT::v16i32, Legal);
- setOperationAction(ISD::XOR, MVT::v16i32, Legal);
+ for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
+ setOperationAction(ISD::SRL, VT, Custom);
+ setOperationAction(ISD::SHL, VT, Custom);
+ setOperationAction(ISD::SRA, VT, Custom);
+ setOperationAction(ISD::AND, VT, Legal);
+ setOperationAction(ISD::OR, VT, Legal);
+ setOperationAction(ISD::XOR, VT, Legal);
+ setOperationAction(ISD::CTPOP, VT, Custom);
+ setOperationAction(ISD::CTTZ, VT, Custom);
+ }
- if (Subtarget->hasCDI()) {
+ if (Subtarget.hasCDI()) {
setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i64, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v16i32, Expand);
setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
setOperationAction(ISD::CTLZ, MVT::v16i16, Custom);
setOperationAction(ISD::CTLZ, MVT::v32i8, Custom);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i16, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v16i8, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v16i16, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v32i8, Expand);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i64, Custom);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i32, Custom);
- if (Subtarget->hasVLX()) {
+ if (Subtarget.hasVLX()) {
setOperationAction(ISD::CTLZ, MVT::v4i64, Legal);
setOperationAction(ISD::CTLZ, MVT::v8i32, Legal);
setOperationAction(ISD::CTLZ, MVT::v2i64, Legal);
setOperationAction(ISD::CTLZ, MVT::v4i32, Legal);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i64, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i32, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v2i64, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i32, Expand);
-
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i32, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
} else {
setOperationAction(ISD::CTLZ, MVT::v4i64, Custom);
setOperationAction(ISD::CTLZ, MVT::v8i32, Custom);
setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i64, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i32, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v2i64, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i32, Expand);
}
- } // Subtarget->hasCDI()
- if (Subtarget->hasDQI()) {
- setOperationAction(ISD::MUL, MVT::v2i64, Legal);
- setOperationAction(ISD::MUL, MVT::v4i64, Legal);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i64, Custom);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i32, Custom);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
+ } // Subtarget.hasCDI()
+
+ if (Subtarget.hasDQI()) {
+ if (Subtarget.hasVLX()) {
+ setOperationAction(ISD::MUL, MVT::v2i64, Legal);
+ setOperationAction(ISD::MUL, MVT::v4i64, Legal);
+ }
setOperationAction(ISD::MUL, MVT::v8i64, Legal);
}
// Custom lower several nodes.
- for (MVT VT : MVT::vector_valuetypes()) {
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
- if (EltSize == 1) {
- setOperationAction(ISD::AND, VT, Legal);
- setOperationAction(ISD::OR, VT, Legal);
- setOperationAction(ISD::XOR, VT, Legal);
- }
- if ((VT.is128BitVector() || VT.is256BitVector()) && EltSize >= 32) {
- setOperationAction(ISD::MGATHER, VT, Custom);
- setOperationAction(ISD::MSCATTER, VT, Custom);
- }
- // Extract subvector is special because the value type
- // (result) is 256/128-bit but the source is 512-bit wide.
- if (VT.is128BitVector() || VT.is256BitVector()) {
- setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
- }
- if (VT.getVectorElementType() == MVT::i1)
- setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
-
- // Do not attempt to custom lower other non-512-bit vectors
- if (!VT.is512BitVector())
- continue;
-
- if (EltSize >= 32) {
- setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
- setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
- setOperationAction(ISD::VSELECT, VT, Legal);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
- setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
- setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
- setOperationAction(ISD::MLOAD, VT, Legal);
- setOperationAction(ISD::MSTORE, VT, Legal);
- setOperationAction(ISD::MGATHER, VT, Legal);
- setOperationAction(ISD::MSCATTER, VT, Custom);
- }
+ for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
+ MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
+ setOperationAction(ISD::MGATHER, VT, Custom);
+ setOperationAction(ISD::MSCATTER, VT, Custom);
+ }
+ // Extract subvector is special because the value type
+ // (result) is 256-bit but the source is 512-bit wide.
+ // 128-bit was made Custom under AVX1.
+ for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
+ MVT::v8f32, MVT::v4f64 })
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
+ for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1,
+ MVT::v16i1, MVT::v32i1, MVT::v64i1 })
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
+
+ for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+ setOperationAction(ISD::VSELECT, VT, Legal);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
+ setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
+ setOperationAction(ISD::MLOAD, VT, Legal);
+ setOperationAction(ISD::MSTORE, VT, Legal);
+ setOperationAction(ISD::MGATHER, VT, Legal);
+ setOperationAction(ISD::MSCATTER, VT, Custom);
}
for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32 }) {
- setOperationAction(ISD::SELECT, VT, Promote);
- AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
+ setOperationPromotedToType(ISD::SELECT, VT, MVT::v8i64);
}
}// has AVX-512
- if (!Subtarget->useSoftFloat() && Subtarget->hasBWI()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
- setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
- setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
+ setOperationAction(ISD::ADD, MVT::v32i1, Expand);
+ setOperationAction(ISD::ADD, MVT::v64i1, Expand);
+ setOperationAction(ISD::SUB, MVT::v32i1, Expand);
+ setOperationAction(ISD::SUB, MVT::v64i1, Expand);
+ setOperationAction(ISD::MUL, MVT::v32i1, Expand);
+ setOperationAction(ISD::MUL, MVT::v64i1, Expand);
+
setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
- setOperationAction(ISD::ADD, MVT::v32i16, Legal);
- setOperationAction(ISD::ADD, MVT::v64i8, Legal);
- setOperationAction(ISD::SUB, MVT::v32i16, Legal);
- setOperationAction(ISD::SUB, MVT::v64i8, Legal);
setOperationAction(ISD::MUL, MVT::v32i16, Legal);
+ setOperationAction(ISD::MUL, MVT::v64i8, Custom);
setOperationAction(ISD::MULHS, MVT::v32i16, Legal);
setOperationAction(ISD::MULHU, MVT::v32i16, Legal);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Custom);
@@ -1646,12 +1439,15 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i8, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32i16, Custom);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v64i8, Custom);
setOperationAction(ISD::SELECT, MVT::v32i1, Custom);
setOperationAction(ISD::SELECT, MVT::v64i1, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
+ setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i16, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i8, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
@@ -1667,6 +1463,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::TRUNCATE, MVT::v32i8, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i1, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i1, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v32i1, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v64i1, Custom);
+ setOperationAction(ISD::VSELECT, MVT::v32i1, Expand);
+ setOperationAction(ISD::VSELECT, MVT::v64i1, Expand);
+ setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom);
setOperationAction(ISD::SMAX, MVT::v64i8, Legal);
setOperationAction(ISD::SMAX, MVT::v32i16, Legal);
@@ -1679,36 +1480,59 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
- if (Subtarget->hasVLX())
+ if (Subtarget.hasVLX())
setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
- if (Subtarget->hasCDI()) {
+ LegalizeAction Action = Subtarget.hasVLX() ? Legal : Custom;
+ for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
+ setOperationAction(ISD::MLOAD, VT, Action);
+ setOperationAction(ISD::MSTORE, VT, Action);
+ }
+
+ if (Subtarget.hasCDI()) {
setOperationAction(ISD::CTLZ, MVT::v32i16, Custom);
setOperationAction(ISD::CTLZ, MVT::v64i8, Custom);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v32i16, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v64i8, Expand);
}
for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
- setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
- setOperationAction(ISD::VSELECT, VT, Legal);
- setOperationAction(ISD::SRL, VT, Custom);
- setOperationAction(ISD::SHL, VT, Custom);
- setOperationAction(ISD::SRA, VT, Custom);
-
- setOperationAction(ISD::AND, VT, Promote);
- AddPromotedToType (ISD::AND, VT, MVT::v8i64);
- setOperationAction(ISD::OR, VT, Promote);
- AddPromotedToType (ISD::OR, VT, MVT::v8i64);
- setOperationAction(ISD::XOR, VT, Promote);
- AddPromotedToType (ISD::XOR, VT, MVT::v8i64);
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+ setOperationAction(ISD::VSELECT, VT, Legal);
+ setOperationAction(ISD::SRL, VT, Custom);
+ setOperationAction(ISD::SHL, VT, Custom);
+ setOperationAction(ISD::SRA, VT, Custom);
+ setOperationAction(ISD::MLOAD, VT, Legal);
+ setOperationAction(ISD::MSTORE, VT, Legal);
+ setOperationAction(ISD::CTPOP, VT, Custom);
+ setOperationAction(ISD::CTTZ, VT, Custom);
+
+ setOperationPromotedToType(ISD::AND, VT, MVT::v8i64);
+ setOperationPromotedToType(ISD::OR, VT, MVT::v8i64);
+ setOperationPromotedToType(ISD::XOR, VT, MVT::v8i64);
+ }
+
+ for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}) {
+ setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
+ if (Subtarget.hasVLX()) {
+ // FIXME. This commands are available on SSE/AVX2, add relevant patterns.
+ setLoadExtAction(ExtType, MVT::v16i16, MVT::v16i8, Legal);
+ setLoadExtAction(ExtType, MVT::v8i16, MVT::v8i8, Legal);
+ }
}
}
- if (!Subtarget->useSoftFloat() && Subtarget->hasVLX()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
+ setOperationAction(ISD::ADD, MVT::v2i1, Expand);
+ setOperationAction(ISD::ADD, MVT::v4i1, Expand);
+ setOperationAction(ISD::SUB, MVT::v2i1, Expand);
+ setOperationAction(ISD::SUB, MVT::v4i1, Expand);
+ setOperationAction(ISD::MUL, MVT::v2i1, Expand);
+ setOperationAction(ISD::MUL, MVT::v4i1, Expand);
+
+ setOperationAction(ISD::TRUNCATE, MVT::v2i1, Custom);
+ setOperationAction(ISD::TRUNCATE, MVT::v4i1, Custom);
setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i1, Custom);
@@ -1721,31 +1545,28 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i1, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i1, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i1, Custom);
+ setOperationAction(ISD::VSELECT, MVT::v2i1, Expand);
+ setOperationAction(ISD::VSELECT, MVT::v4i1, Expand);
+
+ for (auto VT : { MVT::v4i32, MVT::v8i32 }) {
+ setOperationAction(ISD::AND, VT, Legal);
+ setOperationAction(ISD::OR, VT, Legal);
+ setOperationAction(ISD::XOR, VT, Legal);
+ }
- setOperationAction(ISD::AND, MVT::v8i32, Legal);
- setOperationAction(ISD::OR, MVT::v8i32, Legal);
- setOperationAction(ISD::XOR, MVT::v8i32, Legal);
- setOperationAction(ISD::AND, MVT::v4i32, Legal);
- setOperationAction(ISD::OR, MVT::v4i32, Legal);
- setOperationAction(ISD::XOR, MVT::v4i32, Legal);
- setOperationAction(ISD::SRA, MVT::v2i64, Custom);
- setOperationAction(ISD::SRA, MVT::v4i64, Custom);
-
- setOperationAction(ISD::SMAX, MVT::v2i64, Legal);
- setOperationAction(ISD::SMAX, MVT::v4i64, Legal);
- setOperationAction(ISD::UMAX, MVT::v2i64, Legal);
- setOperationAction(ISD::UMAX, MVT::v4i64, Legal);
- setOperationAction(ISD::SMIN, MVT::v2i64, Legal);
- setOperationAction(ISD::SMIN, MVT::v4i64, Legal);
- setOperationAction(ISD::UMIN, MVT::v2i64, Legal);
- setOperationAction(ISD::UMIN, MVT::v4i64, Legal);
+ for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
+ setOperationAction(ISD::SMAX, VT, Legal);
+ setOperationAction(ISD::UMAX, VT, Legal);
+ setOperationAction(ISD::SMIN, VT, Legal);
+ setOperationAction(ISD::UMIN, VT, Legal);
+ }
}
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
- if (!Subtarget->is64Bit()) {
+ if (!Subtarget.is64Bit()) {
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
}
@@ -1757,7 +1578,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// subtraction on x86-32 once PR3203 is fixed. We really can't do much better
// than generic legalization for 64-bit multiplication-with-overflow, though.
for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
- if (VT == MVT::i64 && !Subtarget->is64Bit())
+ if (VT == MVT::i64 && !Subtarget.is64Bit())
continue;
// Add/Sub/Mul with overflow operations are custom lowered.
setOperationAction(ISD::SADDO, VT, Custom);
@@ -1768,7 +1589,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UMULO, VT, Custom);
}
- if (!Subtarget->is64Bit()) {
+ if (!Subtarget.is64Bit()) {
// These libcalls are not available in 32-bit.
setLibcallName(RTLIB::SHL_I128, nullptr);
setLibcallName(RTLIB::SRL_I128, nullptr);
@@ -1776,10 +1597,10 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
}
// Combine sin / cos into one node or libcall if possible.
- if (Subtarget->hasSinCos()) {
+ if (Subtarget.hasSinCos()) {
setLibcallName(RTLIB::SINCOS_F32, "sincosf");
setLibcallName(RTLIB::SINCOS_F64, "sincos");
- if (Subtarget->isTargetDarwin()) {
+ if (Subtarget.isTargetDarwin()) {
// For MacOSX, we don't want the normal expansion of a libcall to sincos.
// We want to issue a libcall to __sincos_stret to avoid memory traffic.
setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
@@ -1787,7 +1608,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
}
}
- if (Subtarget->isTargetWin64()) {
+ if (Subtarget.isTargetWin64()) {
setOperationAction(ISD::SDIV, MVT::i128, Custom);
setOperationAction(ISD::UDIV, MVT::i128, Custom);
setOperationAction(ISD::SREM, MVT::i128, Custom);
@@ -1796,6 +1617,17 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
}
+ // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
+ // is. We should promote the value to 64-bits to solve this.
+ // This is what the CRT headers do - `fmodf` is an inline header
+ // function casting to f64 and calling `fmod`.
+ if (Subtarget.is32Bit() && Subtarget.isTargetKnownWindowsMSVC())
+ for (ISD::NodeType Op :
+ {ISD::FCEIL, ISD::FCOS, ISD::FEXP, ISD::FFLOOR, ISD::FREM, ISD::FLOG,
+ ISD::FLOG10, ISD::FPOW, ISD::FSIN})
+ if (isOperationExpand(Op, MVT::f32))
+ setOperationAction(Op, MVT::f32, Promote);
+
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
@@ -1827,13 +1659,12 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setTargetDAGCombine(ISD::SINT_TO_FP);
setTargetDAGCombine(ISD::UINT_TO_FP);
setTargetDAGCombine(ISD::SETCC);
- setTargetDAGCombine(ISD::BUILD_VECTOR);
setTargetDAGCombine(ISD::MUL);
setTargetDAGCombine(ISD::XOR);
setTargetDAGCombine(ISD::MSCATTER);
setTargetDAGCombine(ISD::MGATHER);
- computeRegisterProperties(Subtarget->getRegisterInfo());
+ computeRegisterProperties(Subtarget.getRegisterInfo());
MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
MaxStoresPerMemsetOptSize = 8;
@@ -1843,9 +1674,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
MaxStoresPerMemmoveOptSize = 4;
setPrefLoopAlignment(4); // 2^4 bytes.
- // A predictable cmov does not hurt on an in-order CPU.
- // FIXME: Use a CPU attribute to trigger this, not a CPU model.
- PredictableSelectIsExpensive = !Subtarget->isAtom();
+ // An out-of-order CPU can speculatively execute past a predictable branch,
+ // but a conditional move could be stalled by an expensive earlier operation.
+ PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
EnableExtLdPromotion = true;
setPrefFunctionAlignment(4); // 2^4 bytes.
@@ -1854,7 +1685,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// This has so far only been implemented for 64-bit MachO.
bool X86TargetLowering::useLoadStackGuardNode() const {
- return Subtarget->isTargetMachO() && Subtarget->is64Bit();
+ return Subtarget.isTargetMachO() && Subtarget.is64Bit();
}
TargetLoweringBase::LegalizeTypeAction
@@ -1867,24 +1698,25 @@ X86TargetLowering::getPreferredVectorAction(EVT VT) const {
return TargetLoweringBase::getPreferredVectorAction(VT);
}
-EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
+EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
+ LLVMContext& Context,
EVT VT) const {
if (!VT.isVector())
- return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
+ return Subtarget.hasAVX512() ? MVT::i1: MVT::i8;
if (VT.isSimple()) {
MVT VVT = VT.getSimpleVT();
const unsigned NumElts = VVT.getVectorNumElements();
- const MVT EltVT = VVT.getVectorElementType();
+ MVT EltVT = VVT.getVectorElementType();
if (VVT.is512BitVector()) {
- if (Subtarget->hasAVX512())
+ if (Subtarget.hasAVX512())
if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
EltVT == MVT::f32 || EltVT == MVT::f64)
switch(NumElts) {
case 8: return MVT::v8i1;
case 16: return MVT::v16i1;
}
- if (Subtarget->hasBWI())
+ if (Subtarget.hasBWI())
if (EltVT == MVT::i8 || EltVT == MVT::i16)
switch(NumElts) {
case 32: return MVT::v32i1;
@@ -1892,23 +1724,20 @@ EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
}
}
- if (VVT.is256BitVector() || VVT.is128BitVector()) {
- if (Subtarget->hasVLX())
- if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
- EltVT == MVT::f32 || EltVT == MVT::f64)
- switch(NumElts) {
- case 2: return MVT::v2i1;
- case 4: return MVT::v4i1;
- case 8: return MVT::v8i1;
- }
- if (Subtarget->hasBWI() && Subtarget->hasVLX())
- if (EltVT == MVT::i8 || EltVT == MVT::i16)
- switch(NumElts) {
- case 8: return MVT::v8i1;
- case 16: return MVT::v16i1;
- case 32: return MVT::v32i1;
- }
+ if (Subtarget.hasBWI() && Subtarget.hasVLX())
+ return MVT::getVectorVT(MVT::i1, NumElts);
+
+ if (!isTypeLegal(VT) && getTypeAction(Context, VT) == TypePromoteInteger) {
+ EVT LegalVT = getTypeToTransformTo(Context, VT);
+ EltVT = LegalVT.getVectorElementType().getSimpleVT();
}
+
+ if (Subtarget.hasVLX() && EltVT.getSizeInBits() >= 32)
+ switch(NumElts) {
+ case 2: return MVT::v2i1;
+ case 4: return MVT::v4i1;
+ case 8: return MVT::v8i1;
+ }
}
return VT.changeVectorElementTypeToInteger();
@@ -1945,7 +1774,7 @@ static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
/// are at 4-byte boundaries.
unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
const DataLayout &DL) const {
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
// Max of 8 and alignment of type.
unsigned TyAlign = DL.getABITypeAlignment(Ty);
if (TyAlign > 8)
@@ -1954,7 +1783,7 @@ unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
}
unsigned Align = 4;
- if (Subtarget->hasSSE1())
+ if (Subtarget.hasSSE1())
getMaxByValAlign(Ty, Align);
return Align;
}
@@ -1977,35 +1806,40 @@ X86TargetLowering::getOptimalMemOpType(uint64_t Size,
bool MemcpyStrSrc,
MachineFunction &MF) const {
const Function *F = MF.getFunction();
- if ((!IsMemset || ZeroMemset) &&
- !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
+ if (!F->hasFnAttribute(Attribute::NoImplicitFloat)) {
if (Size >= 16 &&
- (!Subtarget->isUnalignedMem16Slow() ||
+ (!Subtarget.isUnalignedMem16Slow() ||
((DstAlign == 0 || DstAlign >= 16) &&
(SrcAlign == 0 || SrcAlign >= 16)))) {
- if (Size >= 32) {
- // FIXME: Check if unaligned 32-byte accesses are slow.
- if (Subtarget->hasInt256())
- return MVT::v8i32;
- if (Subtarget->hasFp256())
- return MVT::v8f32;
+ // FIXME: Check if unaligned 32-byte accesses are slow.
+ if (Size >= 32 && Subtarget.hasAVX()) {
+ // Although this isn't a well-supported type for AVX1, we'll let
+ // legalization and shuffle lowering produce the optimal codegen. If we
+ // choose an optimal type with a vector element larger than a byte,
+ // getMemsetStores() may create an intermediate splat (using an integer
+ // multiply) before we splat as a vector.
+ return MVT::v32i8;
}
- if (Subtarget->hasSSE2())
- return MVT::v4i32;
- if (Subtarget->hasSSE1())
+ if (Subtarget.hasSSE2())
+ return MVT::v16i8;
+ // TODO: Can SSE1 handle a byte vector?
+ if (Subtarget.hasSSE1())
return MVT::v4f32;
- } else if (!MemcpyStrSrc && Size >= 8 &&
- !Subtarget->is64Bit() &&
- Subtarget->hasSSE2()) {
+ } else if ((!IsMemset || ZeroMemset) && !MemcpyStrSrc && Size >= 8 &&
+ !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
// Do not use f64 to lower memcpy if source is string constant. It's
// better to use i32 to avoid the loads.
+ // Also, do not use f64 to lower memset unless this is a memset of zeros.
+ // The gymnastics of splatting a byte value into an XMM register and then
+ // only using 8-byte stores (because this is a CPU with slow unaligned
+ // 16-byte accesses) makes that a loser.
return MVT::f64;
}
}
// This is a compromise. If we reach here, unaligned accesses may be slow on
// this target. However, creating smaller, aligned accesses could be even
// slower and would certainly be a lot more code.
- if (Subtarget->is64Bit() && Size >= 8)
+ if (Subtarget.is64Bit() && Size >= 8)
return MVT::i64;
return MVT::i32;
}
@@ -2030,10 +1864,10 @@ X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
*Fast = true;
break;
case 128:
- *Fast = !Subtarget->isUnalignedMem16Slow();
+ *Fast = !Subtarget.isUnalignedMem16Slow();
break;
case 256:
- *Fast = !Subtarget->isUnalignedMem32Slow();
+ *Fast = !Subtarget.isUnalignedMem32Slow();
break;
// TODO: What about AVX-512 (512-bit) accesses?
}
@@ -2048,8 +1882,7 @@ X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
unsigned X86TargetLowering::getJumpTableEncoding() const {
// In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
// symbol.
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
- Subtarget->isPICStyleGOT())
+ if (isPositionIndependent() && Subtarget.isPICStyleGOT())
return MachineJumpTableInfo::EK_Custom32;
// Otherwise, use the normal jump table encoding heuristics.
@@ -2057,15 +1890,14 @@ unsigned X86TargetLowering::getJumpTableEncoding() const {
}
bool X86TargetLowering::useSoftFloat() const {
- return Subtarget->useSoftFloat();
+ return Subtarget.useSoftFloat();
}
const MCExpr *
X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB,
unsigned uid,MCContext &Ctx) const{
- assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
- Subtarget->isPICStyleGOT());
+ assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
// In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
// entries.
return MCSymbolRefExpr::create(MBB->getSymbol(),
@@ -2075,7 +1907,7 @@ X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
/// Returns relocation base for the given PIC jumptable.
SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
SelectionDAG &DAG) const {
- if (!Subtarget->is64Bit())
+ if (!Subtarget.is64Bit())
// This doesn't have SDLoc associated with it, but is not really the
// same as a Register.
return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
@@ -2089,7 +1921,7 @@ const MCExpr *X86TargetLowering::
getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
MCContext &Ctx) const {
// X86-64 uses RIP relative addressing based on the jump table label.
- if (Subtarget->isPICStyleRIPRel())
+ if (Subtarget.isPICStyleRIPRel())
return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
// Otherwise, the reference is relative to the PIC base.
@@ -2105,7 +1937,7 @@ X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
default:
return TargetLowering::findRepresentativeClass(TRI, VT);
case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
- RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
+ RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
break;
case MVT::x86mmx:
RRC = &X86::VR64RegClass;
@@ -2121,47 +1953,76 @@ X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
return std::make_pair(RRC, Cost);
}
-bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
- unsigned &Offset) const {
- if (!Subtarget->isTargetLinux())
- return false;
+unsigned X86TargetLowering::getAddressSpace() const {
+ if (Subtarget.is64Bit())
+ return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
+ return 256;
+}
- if (Subtarget->is64Bit()) {
- // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
- Offset = 0x28;
- if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
- AddressSpace = 256;
- else
- AddressSpace = 257;
- } else {
- // %gs:0x14 on i386
- Offset = 0x14;
- AddressSpace = 256;
+Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
+ // glibc has a special slot for the stack guard in tcbhead_t, use it instead
+ // of the usual global variable (see sysdeps/{i386,x86_64}/nptl/tls.h)
+ if (!Subtarget.isTargetGlibc())
+ return TargetLowering::getIRStackGuard(IRB);
+
+ // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
+ // %gs:0x14 on i386
+ unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
+ unsigned AddressSpace = getAddressSpace();
+ return ConstantExpr::getIntToPtr(
+ ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
+ Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
+}
+
+void X86TargetLowering::insertSSPDeclarations(Module &M) const {
+ // MSVC CRT provides functionalities for stack protection.
+ if (Subtarget.getTargetTriple().isOSMSVCRT()) {
+ // MSVC CRT has a global variable holding security cookie.
+ M.getOrInsertGlobal("__security_cookie",
+ Type::getInt8PtrTy(M.getContext()));
+
+ // MSVC CRT has a function to validate security cookie.
+ auto *SecurityCheckCookie = cast<Function>(
+ M.getOrInsertFunction("__security_check_cookie",
+ Type::getVoidTy(M.getContext()),
+ Type::getInt8PtrTy(M.getContext()), nullptr));
+ SecurityCheckCookie->setCallingConv(CallingConv::X86_FastCall);
+ SecurityCheckCookie->addAttribute(1, Attribute::AttrKind::InReg);
+ return;
}
- return true;
+ // glibc has a special slot for the stack guard.
+ if (Subtarget.isTargetGlibc())
+ return;
+ TargetLowering::insertSSPDeclarations(M);
+}
+
+Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
+ // MSVC CRT has a global variable holding security cookie.
+ if (Subtarget.getTargetTriple().isOSMSVCRT())
+ return M.getGlobalVariable("__security_cookie");
+ return TargetLowering::getSDagStackGuard(M);
+}
+
+Value *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
+ // MSVC CRT has a function to validate security cookie.
+ if (Subtarget.getTargetTriple().isOSMSVCRT())
+ return M.getFunction("__security_check_cookie");
+ return TargetLowering::getSSPStackGuardCheck(M);
}
Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
- if (!Subtarget->isTargetAndroid())
+ if (!Subtarget.isTargetAndroid())
return TargetLowering::getSafeStackPointerLocation(IRB);
// Android provides a fixed TLS slot for the SafeStack pointer. See the
// definition of TLS_SLOT_SAFESTACK in
// https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
unsigned AddressSpace, Offset;
- if (Subtarget->is64Bit()) {
- // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
- Offset = 0x48;
- if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
- AddressSpace = 256;
- else
- AddressSpace = 257;
- } else {
- // %gs:0x24 on i386
- Offset = 0x24;
- AddressSpace = 256;
- }
+ // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
+ // %gs:0x24 on i386
+ Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
+ AddressSpace = getAddressSpace();
return ConstantExpr::getIntToPtr(
ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
@@ -2194,11 +2055,11 @@ const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
}
SDValue
-X86TargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
+X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const {
+ const SDLoc &dl, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
@@ -2214,10 +2075,10 @@ X86TargetLowering::LowerReturn(SDValue Chain,
RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
// Operand #1 = Bytes To Pop
RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
- MVT::i16));
+ MVT::i32));
// Copy the result values into the output registers.
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
SDValue ValToCopy = OutVals[i];
@@ -2244,14 +2105,14 @@ X86TargetLowering::LowerReturn(SDValue Chain,
// or SSE or MMX vectors.
if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
- (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
+ (Subtarget.is64Bit() && !Subtarget.hasSSE1())) {
report_fatal_error("SSE register return with SSE disabled");
}
// Likewise we can't return F64 values with SSE1 only. gcc does so, but
// llvm-gcc has never done it right and no one has noticed, so this
// should be OK for now.
if (ValVT == MVT::f64 &&
- (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
+ (Subtarget.is64Bit() && !Subtarget.hasSSE2()))
report_fatal_error("SSE2 register return with SSE2 disabled");
// Returns in ST0/ST1 are handled specially: these are pushed as operands to
@@ -2269,7 +2130,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
// 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
// which is returned in RAX / RDX.
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
if (ValVT == MVT::x86mmx) {
if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
@@ -2277,7 +2138,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
ValToCopy);
// If we don't have SSE2 available, convert to v4f32 so the generated
// register is legal.
- if (!Subtarget->hasSSE2())
+ if (!Subtarget.hasSSE2())
ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
}
}
@@ -2288,6 +2149,9 @@ X86TargetLowering::LowerReturn(SDValue Chain,
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
}
+ // Swift calling convention does not require we copy the sret argument
+ // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
+
// All x86 ABIs require that for returning structs by value we copy
// the sret argument into %rax/%eax (depending on ABI) for the return.
// We saved the argument into a virtual register in the entry block,
@@ -2298,11 +2162,30 @@ X86TargetLowering::LowerReturn(SDValue Chain,
// false, then an sret argument may be implicitly inserted in the SelDAG. In
// either case FuncInfo->setSRetReturnReg() will have been called.
if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
- SDValue Val = DAG.getCopyFromReg(Chain, dl, SRetReg,
+ // When we have both sret and another return value, we should use the
+ // original Chain stored in RetOps[0], instead of the current Chain updated
+ // in the above loop. If we only have sret, RetOps[0] equals to Chain.
+
+ // For the case of sret and another return value, we have
+ // Chain_0 at the function entry
+ // Chain_1 = getCopyToReg(Chain_0) in the above loop
+ // If we use Chain_1 in getCopyFromReg, we will have
+ // Val = getCopyFromReg(Chain_1)
+ // Chain_2 = getCopyToReg(Chain_1, Val) from below
+
+ // getCopyToReg(Chain_0) will be glued together with
+ // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
+ // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
+ // Data dependency from Unit B to Unit A due to usage of Val in
+ // getCopyToReg(Chain_1, Val)
+ // Chain dependency from Unit A to Unit B
+
+ // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
+ SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
getPointerTy(MF.getDataLayout()));
unsigned RetValReg
- = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
+ = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
X86::RAX : X86::EAX;
Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
Flag = Chain.getValue(1);
@@ -2312,7 +2195,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
}
- const X86RegisterInfo *TRI = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
const MCPhysReg *I =
TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
if (I) {
@@ -2337,9 +2220,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
}
bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
- if (N->getNumValues() != 1)
- return false;
- if (!N->hasNUsesOfValue(1, 0))
+ if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
return false;
SDValue TCChain = Chain;
@@ -2375,15 +2256,19 @@ bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
return true;
}
-EVT
-X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
- ISD::NodeType ExtendKind) const {
- MVT ReturnMVT;
- // TODO: Is this also valid on 32-bit?
- if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
+EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
+ ISD::NodeType ExtendKind) const {
+ MVT ReturnMVT = MVT::i32;
+
+ bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
+ if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
+ // The ABI does not require i1, i8 or i16 to be extended.
+ //
+ // On Darwin, there is code in the wild relying on Clang's old behaviour of
+ // always extending i8/i16 return values, so keep doing that for now.
+ // (PR26665).
ReturnMVT = MVT::i8;
- else
- ReturnMVT = MVT::i32;
+ }
EVT MinVT = getRegisterType(Context, ReturnMVT);
return VT.bitsLT(MinVT) ? MinVT : VT;
@@ -2392,16 +2277,14 @@ X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
/// Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
///
-SDValue
-X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+SDValue X86TargetLowering::LowerCallResult(
+ SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
- bool Is64Bit = Subtarget->is64Bit();
+ bool Is64Bit = Subtarget.is64Bit();
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
*DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
@@ -2413,7 +2296,7 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
// If this is x86-64, and we disabled SSE, we can't return FP values
if ((CopyVT == MVT::f32 || CopyVT == MVT::f64 || CopyVT == MVT::f128) &&
- ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
+ ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget.hasSSE1())) {
report_fatal_error("SSE register return with SSE disabled");
}
@@ -2422,6 +2305,8 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
bool RoundAfterCopy = false;
if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
isScalarFPTypeInSSEReg(VA.getValVT())) {
+ if (!Subtarget.hasX87())
+ report_fatal_error("X87 register return with X87 disabled");
CopyVT = MVT::f80;
RoundAfterCopy = (CopyVT != VA.getLocVT());
}
@@ -2492,10 +2377,9 @@ argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins, bool IsMCU) {
/// Make a copy of an aggregate at address specified by "Src" to address
/// "Dst" with size and alignment information specified by the specific
/// parameter attribute. The copy will be passed as a byval function parameter.
-static SDValue
-CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
- ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
- SDLoc dl) {
+static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
+ SDValue Chain, ISD::ArgFlagsTy Flags,
+ SelectionDAG &DAG, const SDLoc &dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
@@ -2549,13 +2433,11 @@ bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
}
SDValue
-X86TargetLowering::LowerMemArgument(SDValue Chain,
- CallingConv::ID CallConv,
+X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
+ const SDLoc &dl, SelectionDAG &DAG,
const CCValAssign &VA,
- MachineFrameInfo *MFI,
- unsigned i) const {
+ MachineFrameInfo *MFI, unsigned i) const {
// Create the nodes corresponding to a load from this parameter slot.
ISD::ArgFlagsTy Flags = Ins[i].Flags;
bool AlwaysUseMutable = shouldGuaranteeTCO(
@@ -2602,6 +2484,14 @@ X86TargetLowering::LowerMemArgument(SDValue Chain,
} else {
int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
VA.getLocMemOffset(), isImmutable);
+
+ // Set SExt or ZExt flag.
+ if (VA.getLocInfo() == CCValAssign::ZExt) {
+ MFI->setObjectZExt(FI, true);
+ } else if (VA.getLocInfo() == CCValAssign::SExt) {
+ MFI->setObjectSExt(FI, true);
+ }
+
// Adjust SP offset of interrupt parameter.
if (CallConv == CallingConv::X86_INTR) {
MFI->setObjectOffset(FI, Offset);
@@ -2610,8 +2500,7 @@ X86TargetLowering::LowerMemArgument(SDValue Chain,
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
SDValue Val = DAG.getLoad(
ValVT, dl, Chain, FIN,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), false,
- false, false, 0);
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
return ExtendedInMem ?
DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val) : Val;
}
@@ -2619,10 +2508,10 @@ X86TargetLowering::LowerMemArgument(SDValue Chain,
// FIXME: Get this from tablegen.
static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
- const X86Subtarget *Subtarget) {
- assert(Subtarget->is64Bit());
+ const X86Subtarget &Subtarget) {
+ assert(Subtarget.is64Bit());
- if (Subtarget->isCallingConvWin64(CallConv)) {
+ if (Subtarget.isCallingConvWin64(CallConv)) {
static const MCPhysReg GPR64ArgRegsWin64[] = {
X86::RCX, X86::RDX, X86::R8, X86::R9
};
@@ -2638,9 +2527,9 @@ static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
// FIXME: Get this from tablegen.
static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
CallingConv::ID CallConv,
- const X86Subtarget *Subtarget) {
- assert(Subtarget->is64Bit());
- if (Subtarget->isCallingConvWin64(CallConv)) {
+ const X86Subtarget &Subtarget) {
+ assert(Subtarget.is64Bit());
+ if (Subtarget.isCallingConvWin64(CallConv)) {
// The XMM registers which might contain var arg parameters are shadowed
// in their paired GPR. So we only need to save the GPR to their home
// slots.
@@ -2650,10 +2539,10 @@ static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
const Function *Fn = MF.getFunction();
bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
- bool isSoftFloat = Subtarget->useSoftFloat();
+ bool isSoftFloat = Subtarget.useSoftFloat();
assert(!(isSoftFloat && NoImplicitFloatOps) &&
"SSE register cannot be used when SSE is disabled!");
- if (isSoftFloat || NoImplicitFloatOps || !Subtarget->hasSSE1())
+ if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1())
// Kernel mode asks for SSE to be disabled, so there are no XMM argument
// registers.
return None;
@@ -2667,21 +2556,21 @@ static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
SDValue X86TargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
- const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
+ const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
- const Function* Fn = MF.getFunction();
+ const Function *Fn = MF.getFunction();
if (Fn->hasExternalLinkage() &&
- Subtarget->isTargetCygMing() &&
+ Subtarget.isTargetCygMing() &&
Fn->getName() == "main")
FuncInfo->setForceFramePointer(true);
MachineFrameInfo *MFI = MF.getFrameInfo();
- bool Is64Bit = Subtarget->is64Bit();
- bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
+ bool Is64Bit = Subtarget.is64Bit();
+ bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
"Var args not supported with calling convention fastcc, ghc or hipe");
@@ -2778,13 +2667,18 @@ SDValue X86TargetLowering::LowerFormalArguments(
// If value is passed via pointer - do a load.
if (VA.getLocInfo() == CCValAssign::Indirect)
- ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
- MachinePointerInfo(), false, false, false, 0);
+ ArgValue =
+ DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
InVals.push_back(ArgValue);
}
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ // Swift calling convention does not require we copy the sret argument
+ // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
+ if (CallConv == CallingConv::Swift)
+ continue;
+
// All x86 ABIs require that for returning structs by value we copy the
// sret argument into %rax/%eax (depending on ABI) for the return. Save
// the argument into a virtual register so that we can access it from the
@@ -2819,7 +2713,7 @@ SDValue X86TargetLowering::LowerFormalArguments(
}
// Figure out if XMM registers are in use.
- assert(!(Subtarget->useSoftFloat() &&
+ assert(!(Subtarget.useSoftFloat() &&
Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
"SSE register cannot be used when SSE is disabled!");
@@ -2831,7 +2725,7 @@ SDValue X86TargetLowering::LowerFormalArguments(
ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
- assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
+ assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
"SSE register cannot be used when SSE is disabled!");
// Gather all the live in physical registers.
@@ -2865,7 +2759,7 @@ SDValue X86TargetLowering::LowerFormalArguments(
} else {
// For X86-64, if there are vararg parameters that are passed via
// registers, then we must store them to their spots on the stack so
- // they may be loaded by deferencing the result of va_next.
+ // they may be loaded by dereferencing the result of va_next.
FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
@@ -2884,8 +2778,7 @@ SDValue X86TargetLowering::LowerFormalArguments(
DAG.getStore(Val.getValue(1), dl, Val, FIN,
MachinePointerInfo::getFixedStack(
DAG.getMachineFunction(),
- FuncInfo->getRegSaveFrameIndex(), Offset),
- false, false, 0);
+ FuncInfo->getRegSaveFrameIndex(), Offset));
MemOps.push_back(Store);
Offset += 8;
}
@@ -2913,13 +2806,13 @@ SDValue X86TargetLowering::LowerFormalArguments(
// Find the largest legal vector type.
MVT VecVT = MVT::Other;
// FIXME: Only some x86_32 calling conventions support AVX512.
- if (Subtarget->hasAVX512() &&
+ if (Subtarget.hasAVX512() &&
(Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
CallConv == CallingConv::Intel_OCL_BI)))
VecVT = MVT::v16f32;
- else if (Subtarget->hasAVX())
+ else if (Subtarget.hasAVX())
VecVT = MVT::v8f32;
- else if (Subtarget->hasSSE2())
+ else if (Subtarget.hasSSE2())
VecVT = MVT::v4f32;
// We forward some GPRs and some vector types.
@@ -2960,8 +2853,8 @@ SDValue X86TargetLowering::LowerFormalArguments(
FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
// If this is an sret function, the return should pop the hidden pointer.
if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
- !Subtarget->getTargetTriple().isOSMSVCRT() &&
- argsAreStructReturn(Ins, Subtarget->isTargetMCU()) == StackStructReturn)
+ !Subtarget.getTargetTriple().isOSMSVCRT() &&
+ argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn)
FuncInfo->setBytesToPopOnReturn(4);
}
@@ -2987,7 +2880,7 @@ SDValue X86TargetLowering::LowerFormalArguments(
// offset from the bottom of this and each funclet's frame must be the
// same, so the size of funclets' (mostly empty) frames is dictated by
// how far this slot is from the bottom (since they allocate just enough
- // space to accomodate holding this slot at the correct offset).
+ // space to accommodate holding this slot at the correct offset).
int PSPSymFI = MFI->CreateStackObject(8, 8, /*isSS=*/false);
EHInfo->PSPSymFrameIdx = PSPSymFI;
}
@@ -2996,12 +2889,11 @@ SDValue X86TargetLowering::LowerFormalArguments(
return Chain;
}
-SDValue
-X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
- SDValue StackPtr, SDValue Arg,
- SDLoc dl, SelectionDAG &DAG,
- const CCValAssign &VA,
- ISD::ArgFlagsTy Flags) const {
+SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
+ SDValue Arg, const SDLoc &dl,
+ SelectionDAG &DAG,
+ const CCValAssign &VA,
+ ISD::ArgFlagsTy Flags) const {
unsigned LocMemOffset = VA.getLocMemOffset();
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
@@ -3011,24 +2903,20 @@ X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
return DAG.getStore(
Chain, dl, Arg, PtrOff,
- MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset),
- false, false, 0);
+ MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
}
/// Emit a load of return address if tail call
/// optimization is performed and it is required.
-SDValue
-X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
- SDValue &OutRetAddr, SDValue Chain,
- bool IsTailCall, bool Is64Bit,
- int FPDiff, SDLoc dl) const {
+SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
+ SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
+ bool Is64Bit, int FPDiff, const SDLoc &dl) const {
// Adjust the Return address stack slot.
EVT VT = getPointerTy(DAG.getDataLayout());
OutRetAddr = getReturnAddressFrameIndex(DAG);
// Load the "old" Return address.
- OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
- false, false, false, 0);
+ OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
return SDValue(OutRetAddr.getNode(), 1);
}
@@ -3037,7 +2925,7 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
SDValue Chain, SDValue RetAddrFrIdx,
EVT PtrVT, unsigned SlotSize,
- int FPDiff, SDLoc dl) {
+ int FPDiff, const SDLoc &dl) {
// Store the return address to the appropriate stack slot.
if (!FPDiff) return Chain;
// Calculate the new stack slot for the return address.
@@ -3047,21 +2935,20 @@ static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
MachinePointerInfo::getFixedStack(
- DAG.getMachineFunction(), NewReturnAddrFI),
- false, false, 0);
+ DAG.getMachineFunction(), NewReturnAddrFI));
return Chain;
}
/// Returns a vector_shuffle mask for an movs{s|d}, movd
/// operation of specified width.
-static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
+static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
SDValue V2) {
unsigned NumElems = VT.getVectorNumElements();
SmallVector<int, 8> Mask;
Mask.push_back(NumElems);
for (unsigned i = 1; i != NumElems; ++i)
Mask.push_back(i);
- return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
+ return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
}
SDValue
@@ -3079,9 +2966,9 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool isVarArg = CLI.IsVarArg;
MachineFunction &MF = DAG.getMachineFunction();
- bool Is64Bit = Subtarget->is64Bit();
- bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
- StructReturnType SR = callIsStructReturn(Outs, Subtarget->isTargetMCU());
+ bool Is64Bit = Subtarget.is64Bit();
+ bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
+ StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
bool IsSibcall = false;
X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
@@ -3092,7 +2979,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (Attr.getValueAsString() == "true")
isTailCall = false;
- if (Subtarget->isPICStyleGOT() &&
+ if (Subtarget.isPICStyleGOT() &&
!MF.getTarget().Options.GuaranteedTailCallOpt) {
// If we are using a GOT, disable tail calls to external symbols with
// default visibility. Tail calling such a symbol requires using a GOT
@@ -3195,7 +3082,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Walk the register/memloc assignments, inserting copies/loads. In the case
// of tail call optimization arguments are handle later.
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
// Skip inalloca arguments, they have already been written.
ISD::ArgFlagsTy Flags = Outs[i].Flags;
@@ -3238,8 +3125,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
Chain = DAG.getStore(
Chain, dl, Arg, SpillSlot,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
- false, false, 0);
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
Arg = SpillSlot;
break;
}
@@ -3273,7 +3159,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
- if (Subtarget->isPICStyleGOT()) {
+ if (Subtarget.isPICStyleGOT()) {
// ELF / PIC requires GOT in the EBX register before function calls via PLT
// GOT pointer.
if (!isTailCall) {
@@ -3314,7 +3200,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
- assert((Subtarget->hasSSE1() || !NumXMMRegs)
+ assert((Subtarget.hasSSE1() || !NumXMMRegs)
&& "SSE registers cannot be used when SSE is disabled");
RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
@@ -3377,8 +3263,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Store relative to framepointer.
MemOpChains2.push_back(DAG.getStore(
ArgChain, dl, Arg, FIN,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
- false, false, 0));
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
}
}
@@ -3416,70 +3301,29 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// non-JIT mode.
const GlobalValue *GV = G->getGlobal();
if (!GV->hasDLLImportStorageClass()) {
- unsigned char OpFlags = 0;
- bool ExtraLoad = false;
- unsigned WrapperKind = ISD::DELETED_NODE;
-
- // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
- // external symbols most go through the PLT in PIC mode. If the symbol
- // has hidden or protected visibility, or if it is static or local, then
- // we don't need to use the PLT - we can directly call it.
- if (Subtarget->isTargetELF() &&
- DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
- GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
- OpFlags = X86II::MO_PLT;
- } else if (Subtarget->isPICStyleStubAny() &&
- !GV->isStrongDefinitionForLinker() &&
- (!Subtarget->getTargetTriple().isMacOSX() ||
- Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
- // PC-relative references to external symbols should go through $stub,
- // unless we're building with the leopard linker or later, which
- // automatically synthesizes these stubs.
- OpFlags = X86II::MO_DARWIN_STUB;
- } else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
- cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
- // If the function is marked as non-lazy, generate an indirect call
- // which loads from the GOT directly. This avoids runtime overhead
- // at the cost of eager binding (and one extra byte of encoding).
- OpFlags = X86II::MO_GOTPCREL;
- WrapperKind = X86ISD::WrapperRIP;
- ExtraLoad = true;
- }
+ unsigned char OpFlags = Subtarget.classifyGlobalFunctionReference(GV);
Callee = DAG.getTargetGlobalAddress(
GV, dl, getPointerTy(DAG.getDataLayout()), G->getOffset(), OpFlags);
- // Add a wrapper if needed.
- if (WrapperKind != ISD::DELETED_NODE)
+ if (OpFlags == X86II::MO_GOTPCREL) {
+ // Add a wrapper.
Callee = DAG.getNode(X86ISD::WrapperRIP, dl,
- getPointerTy(DAG.getDataLayout()), Callee);
- // Add extra indirection if needed.
- if (ExtraLoad)
+ getPointerTy(DAG.getDataLayout()), Callee);
+ // Add extra indirection
Callee = DAG.getLoad(
getPointerTy(DAG.getDataLayout()), dl, DAG.getEntryNode(), Callee,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()), false, false,
- false, 0);
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
+ }
}
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
- unsigned char OpFlags = 0;
-
- // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
- // external symbols should go through the PLT.
- if (Subtarget->isTargetELF() &&
- DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
- OpFlags = X86II::MO_PLT;
- } else if (Subtarget->isPICStyleStubAny() &&
- (!Subtarget->getTargetTriple().isMacOSX() ||
- Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
- // PC-relative references to external symbols should go through $stub,
- // unless we're building with the leopard linker or later, which
- // automatically synthesizes these stubs.
- OpFlags = X86II::MO_DARWIN_STUB;
- }
+ const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
+ unsigned char OpFlags =
+ Subtarget.classifyGlobalFunctionReference(nullptr, *Mod);
Callee = DAG.getTargetExternalSymbol(
S->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlags);
- } else if (Subtarget->isTarget64BitILP32() &&
+ } else if (Subtarget.isTarget64BitILP32() &&
Callee->getValueType(0) == MVT::i32) {
// Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
@@ -3552,7 +3396,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
DAG.getTarget().Options.GuaranteedTailCallOpt))
NumBytesForCalleeToPop = NumBytes; // Callee pops everything
else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
- !Subtarget->getTargetTriple().isOSMSVCRT() &&
+ !Subtarget.getTargetTriple().isOSMSVCRT() &&
SR == StackStructReturn)
// If this is a call to a struct-return function, the callee
// pops the hidden struct pointer, so we have to push it back.
@@ -3562,6 +3406,12 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
else
NumBytesForCalleeToPop = 0; // Callee pops nothing.
+ if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
+ // No need to reset the stack after the call if the call doesn't return. To
+ // make the MI verify, we'll pretend the callee does it for us.
+ NumBytesForCalleeToPop = NumBytes;
+ }
+
// Returns a flag for retval copy to use.
if (!IsSibcall) {
Chain = DAG.getCALLSEQ_END(Chain,
@@ -3614,8 +3464,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
unsigned
X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
SelectionDAG& DAG) const {
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
- const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
+ const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
unsigned StackAlignment = TFI.getStackAlignment();
uint64_t AlignMask = StackAlignment - 1;
int64_t Offset = StackSize;
@@ -3636,8 +3486,28 @@ X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
static
bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
- const X86InstrInfo *TII) {
+ const X86InstrInfo *TII, const CCValAssign &VA) {
unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
+
+ for (;;) {
+ // Look through nodes that don't alter the bits of the incoming value.
+ unsigned Op = Arg.getOpcode();
+ if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
+ Arg = Arg.getOperand(0);
+ continue;
+ }
+ if (Op == ISD::TRUNCATE) {
+ const SDValue &TruncInput = Arg.getOperand(0);
+ if (TruncInput.getOpcode() == ISD::AssertZext &&
+ cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
+ Arg.getValueType()) {
+ Arg = TruncInput.getOperand(0);
+ continue;
+ }
+ }
+ break;
+ }
+
int FI = INT_MAX;
if (Arg.getOpcode() == ISD::CopyFromReg) {
unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
@@ -3647,7 +3517,7 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
if (!Def)
return false;
if (!Flags.isByVal()) {
- if (!TII->isLoadFromStackSlot(Def, FI))
+ if (!TII->isLoadFromStackSlot(*Def, FI))
return false;
} else {
unsigned Opcode = Def->getOpcode();
@@ -3682,7 +3552,20 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
assert(FI != INT_MAX);
if (!MFI->isFixedObjectIndex(FI))
return false;
- return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
+
+ if (Offset != MFI->getObjectOffset(FI))
+ return false;
+
+ if (VA.getLocVT().getSizeInBits() > Arg.getValueType().getSizeInBits()) {
+ // If the argument location is wider than the argument type, check that any
+ // extension flags match.
+ if (Flags.isZExt() != MFI->isObjectZExt(FI) ||
+ Flags.isSExt() != MFI->isObjectSExt(FI)) {
+ return false;
+ }
+ }
+
+ return Bytes == MFI->getObjectSize(FI);
}
/// Check whether the call is eligible for tail call optimization. Targets
@@ -3708,8 +3591,8 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
CallingConv::ID CallerCC = CallerF->getCallingConv();
bool CCMatch = CallerCC == CalleeCC;
- bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
- bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
+ bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
+ bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
// Win64 functions have extra shadow space for argument homing. Don't do the
// sibcall if the caller and callee have mismatched expectations for this
@@ -3728,7 +3611,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
// Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
// emit a special epilogue.
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
if (RegInfo->needsStackRealignment(MF))
return false;
@@ -3739,6 +3622,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
// Do not sibcall optimize vararg calls unless all arguments are passed via
// registers.
+ LLVMContext &C = *DAG.getContext();
if (isVarArg && !Outs.empty()) {
// Optimizing for varargs on Win64 is unlikely to be safe without
// additional testing.
@@ -3746,8 +3630,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
return false;
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
- *DAG.getContext());
+ CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
@@ -3767,8 +3650,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
}
if (Unused) {
SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
- *DAG.getContext());
+ CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
CCValAssign &VA = RVLocs[i];
@@ -3777,34 +3659,17 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
}
}
- // If the calling conventions do not match, then we'd better make sure the
- // results are returned in the same way as what the caller expects.
+ // Check that the call results are passed in the same way.
+ if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
+ RetCC_X86, RetCC_X86))
+ return false;
+ // The callee has to preserve all registers the caller needs to preserve.
+ const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
+ const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
if (!CCMatch) {
- SmallVector<CCValAssign, 16> RVLocs1;
- CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
- *DAG.getContext());
- CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
-
- SmallVector<CCValAssign, 16> RVLocs2;
- CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
- *DAG.getContext());
- CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
-
- if (RVLocs1.size() != RVLocs2.size())
+ const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
+ if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
return false;
- for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
- if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
- return false;
- if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
- return false;
- if (RVLocs1[i].isRegLoc()) {
- if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
- return false;
- } else {
- if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
- return false;
- }
- }
}
unsigned StackArgsSize = 0;
@@ -3815,8 +3680,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
// Check if stack adjustment is needed. For now, do not do this if any
// argument is passed on the stack.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
- *DAG.getContext());
+ CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
// Allocate shadow area for Win64
if (IsCalleeWin64)
@@ -3830,7 +3694,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
// the caller's fixed stack objects.
MachineFrameInfo *MFI = MF.getFrameInfo();
const MachineRegisterInfo *MRI = &MF.getRegInfo();
- const X86InstrInfo *TII = Subtarget->getInstrInfo();
+ const X86InstrInfo *TII = Subtarget.getInstrInfo();
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
SDValue Arg = OutVals[i];
@@ -3839,26 +3703,25 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
return false;
if (!VA.isRegLoc()) {
if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
- MFI, MRI, TII))
+ MFI, MRI, TII, VA))
return false;
}
}
}
+ bool PositionIndependent = isPositionIndependent();
// If the tailcall address may be in a register, then make sure it's
// possible to register allocate for it. In 32-bit, the call address can
// only target EAX, EDX, or ECX since the tail call must be scheduled after
// callee-saved registers are restored. These happen to be the same
// registers used to pass 'inreg' arguments so watch out for those.
- if (!Subtarget->is64Bit() &&
- ((!isa<GlobalAddressSDNode>(Callee) &&
- !isa<ExternalSymbolSDNode>(Callee)) ||
- DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
+ if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
+ !isa<ExternalSymbolSDNode>(Callee)) ||
+ PositionIndependent)) {
unsigned NumInRegs = 0;
// In PIC we need an extra register to formulate the address computation
// for the callee.
- unsigned MaxInRegs =
- (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
+ unsigned MaxInRegs = PositionIndependent ? 2 : 3;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
@@ -3874,10 +3737,14 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
}
}
}
+
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
+ return false;
}
bool CalleeWillPop =
- X86::isCalleePop(CalleeCC, Subtarget->is64Bit(), isVarArg,
+ X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
MF.getTarget().Options.GuaranteedTailCallOpt);
if (unsigned BytesToPop =
@@ -3923,6 +3790,8 @@ static bool isTargetShuffle(unsigned Opcode) {
case X86ISD::SHUFP:
case X86ISD::INSERTPS:
case X86ISD::PALIGNR:
+ case X86ISD::VSHLDQ:
+ case X86ISD::VSRLDQ:
case X86ISD::MOVLHPS:
case X86ISD::MOVLHPD:
case X86ISD::MOVHLPS:
@@ -3935,16 +3804,30 @@ static bool isTargetShuffle(unsigned Opcode) {
case X86ISD::MOVSD:
case X86ISD::UNPCKL:
case X86ISD::UNPCKH:
+ case X86ISD::VBROADCAST:
case X86ISD::VPERMILPI:
+ case X86ISD::VPERMILPV:
case X86ISD::VPERM2X128:
+ case X86ISD::VPERMIL2:
case X86ISD::VPERMI:
+ case X86ISD::VPPERM:
case X86ISD::VPERMV:
case X86ISD::VPERMV3:
+ case X86ISD::VZEXT_MOVL:
return true;
}
}
-static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, MVT VT,
+static bool isTargetShuffleVariableMask(unsigned Opcode) {
+ switch (Opcode) {
+ default: return false;
+ case X86ISD::PSHUFB:
+ case X86ISD::VPERMILPV:
+ return true;
+ }
+}
+
+static SDValue getTargetShuffleNode(unsigned Opc, const SDLoc &dl, MVT VT,
SDValue V1, unsigned TargetMask,
SelectionDAG &DAG) {
switch(Opc) {
@@ -3959,7 +3842,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, MVT VT,
}
}
-static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, MVT VT,
+static SDValue getTargetShuffleNode(unsigned Opc, const SDLoc &dl, MVT VT,
SDValue V1, SDValue V2, SelectionDAG &DAG) {
switch(Opc) {
default: llvm_unreachable("Unknown x86 shuffle node");
@@ -3978,7 +3861,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, MVT VT,
SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
int ReturnAddrIndex = FuncInfo->getRAIndex();
@@ -4047,17 +3930,20 @@ bool X86::isCalleePop(CallingConv::ID CallingConv,
/// \brief Return true if the condition is an unsigned comparison operation.
static bool isX86CCUnsigned(unsigned X86CC) {
switch (X86CC) {
- default: llvm_unreachable("Invalid integer condition!");
- case X86::COND_E: return true;
- case X86::COND_G: return false;
- case X86::COND_GE: return false;
- case X86::COND_L: return false;
- case X86::COND_LE: return false;
- case X86::COND_NE: return true;
- case X86::COND_B: return true;
- case X86::COND_A: return true;
- case X86::COND_BE: return true;
- case X86::COND_AE: return true;
+ default:
+ llvm_unreachable("Invalid integer condition!");
+ case X86::COND_E:
+ case X86::COND_NE:
+ case X86::COND_B:
+ case X86::COND_A:
+ case X86::COND_BE:
+ case X86::COND_AE:
+ return true;
+ case X86::COND_G:
+ case X86::COND_GE:
+ case X86::COND_L:
+ case X86::COND_LE:
+ return false;
}
}
@@ -4080,8 +3966,9 @@ static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
/// Do a one-to-one translation of a ISD::CondCode to the X86-specific
/// condition code, returning the condition code and the LHS/RHS of the
/// comparison to make.
-static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, SDLoc DL, bool isFP,
- SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
+static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
+ bool isFP, SDValue &LHS, SDValue &RHS,
+ SelectionDAG &DAG) {
if (!isFP) {
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
@@ -4181,24 +4068,50 @@ bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
if (!IntrData)
return false;
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.readMem = false;
+ Info.writeMem = false;
+ Info.vol = false;
+ Info.offset = 0;
+
switch (IntrData->Type) {
- case LOADA:
- case LOADU: {
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(I.getType());
+ case EXPAND_FROM_MEM: {
Info.ptrVal = I.getArgOperand(0);
- Info.offset = 0;
- Info.align = (IntrData->Type == LOADA ? Info.memVT.getSizeInBits()/8 : 1);
- Info.vol = false;
+ Info.memVT = MVT::getVT(I.getType());
+ Info.align = 1;
Info.readMem = true;
- Info.writeMem = false;
- return true;
+ break;
}
- default:
+ case COMPRESS_TO_MEM: {
+ Info.ptrVal = I.getArgOperand(0);
+ Info.memVT = MVT::getVT(I.getArgOperand(1)->getType());
+ Info.align = 1;
+ Info.writeMem = true;
break;
}
+ case TRUNCATE_TO_MEM_VI8:
+ case TRUNCATE_TO_MEM_VI16:
+ case TRUNCATE_TO_MEM_VI32: {
+ Info.ptrVal = I.getArgOperand(0);
+ MVT VT = MVT::getVT(I.getArgOperand(1)->getType());
+ MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
+ if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
+ ScalarVT = MVT::i8;
+ else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
+ ScalarVT = MVT::i16;
+ else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
+ ScalarVT = MVT::i32;
+
+ Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
+ Info.align = 1;
+ Info.writeMem = true;
+ break;
+ }
+ default:
+ return false;
+ }
- return false;
+ return true;
}
/// Returns true if the target can instruction select the
@@ -4246,12 +4159,24 @@ bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
bool X86TargetLowering::isCheapToSpeculateCttz() const {
// Speculate cttz only if we can directly use TZCNT.
- return Subtarget->hasBMI();
+ return Subtarget.hasBMI();
}
bool X86TargetLowering::isCheapToSpeculateCtlz() const {
// Speculate ctlz only if we can directly use LZCNT.
- return Subtarget->hasLZCNT();
+ return Subtarget.hasLZCNT();
+}
+
+bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
+ if (!Subtarget.hasBMI())
+ return false;
+
+ // There are only 32-bit and 64-bit forms for 'andn'.
+ EVT VT = Y.getValueType();
+ if (VT != MVT::i32 && VT != MVT::i64)
+ return false;
+
+ return true;
}
/// Return true if every element in Mask, beginning
@@ -4269,11 +4194,26 @@ static bool isUndefOrInRange(int Val, int Low, int Hi) {
return (Val < 0) || (Val >= Low && Val < Hi);
}
+/// Return true if every element in Mask is undef or if its value
+/// falls within the specified range (L, H].
+static bool isUndefOrInRange(ArrayRef<int> Mask,
+ int Low, int Hi) {
+ for (int M : Mask)
+ if (!isUndefOrInRange(M, Low, Hi))
+ return false;
+ return true;
+}
+
/// Val is either less than zero (undef) or equal to the specified value.
static bool isUndefOrEqual(int Val, int CmpVal) {
return (Val < 0 || Val == CmpVal);
}
+/// Val is either the undef or zero sentinel value.
+static bool isUndefOrZero(int Val) {
+ return (Val == SM_SentinelUndef || Val == SM_SentinelZero);
+}
+
/// Return true if every element in Mask, beginning
/// from position Pos and ending in Pos+Size, falls within the specified
/// sequential range (Low, Low+Size]. or is undef.
@@ -4285,6 +4225,17 @@ static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
return true;
}
+/// Return true if every element in Mask, beginning
+/// from position Pos and ending in Pos+Size, falls within the specified
+/// sequential range (Low, Low+Size], or is undef or is zero.
+static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
+ unsigned Size, int Low) {
+ for (unsigned i = Pos, e = Pos + Size; i != e; ++i, ++Low)
+ if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
+ return false;
+ return true;
+}
+
/// Return true if the specified EXTRACT_SUBVECTOR operand specifies a vector
/// extract that is suitable for instruction that extract 128 or 256 bit vectors
static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
@@ -4399,9 +4350,8 @@ bool X86::isZeroNode(SDValue Elt) {
// Build a vector of constants
// Use an UNDEF node if MaskElt == -1.
// Spilt 64-bit constants in the 32-bit mode.
-static SDValue getConstVector(ArrayRef<int> Values, MVT VT,
- SelectionDAG &DAG,
- SDLoc dl, bool IsMask = false) {
+static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
+ const SDLoc &dl, bool IsMask = false) {
SmallVector<SDValue, 32> Ops;
bool Split = false;
@@ -4424,63 +4374,40 @@ static SDValue getConstVector(ArrayRef<int> Values, MVT VT,
Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
DAG.getConstant(0, dl, EltVT));
}
- SDValue ConstsNode = DAG.getNode(ISD::BUILD_VECTOR, dl, ConstVecVT, Ops);
+ SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
if (Split)
ConstsNode = DAG.getBitcast(VT, ConstsNode);
return ConstsNode;
}
/// Returns a vector of specified type with all zero elements.
-static SDValue getZeroVector(MVT VT, const X86Subtarget *Subtarget,
- SelectionDAG &DAG, SDLoc dl) {
- assert(VT.isVector() && "Expected a vector type");
-
- // Always build SSE zero vectors as <4 x i32> bitcasted
- // to their dest type. This ensures they get CSE'd.
+static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
+ SelectionDAG &DAG, const SDLoc &dl) {
+ assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
+ VT.getVectorElementType() == MVT::i1) &&
+ "Unexpected vector type");
+
+ // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
+ // type. This ensures they get CSE'd. But if the integer type is not
+ // available, use a floating-point +0.0 instead.
SDValue Vec;
- if (VT.is128BitVector()) { // SSE
- if (Subtarget->hasSSE2()) { // SSE2
- SDValue Cst = DAG.getConstant(0, dl, MVT::i32);
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
- } else { // SSE1
- SDValue Cst = DAG.getConstantFP(+0.0, dl, MVT::f32);
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
- }
- } else if (VT.is256BitVector()) { // AVX
- if (Subtarget->hasInt256()) { // AVX2
- SDValue Cst = DAG.getConstant(0, dl, MVT::i32);
- SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
- } else {
- // 256-bit logic and arithmetic instructions in AVX are all
- // floating-point, no support for integer ops. Emit fp zeroed vectors.
- SDValue Cst = DAG.getConstantFP(+0.0, dl, MVT::f32);
- SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
- }
- } else if (VT.is512BitVector()) { // AVX-512
- SDValue Cst = DAG.getConstant(0, dl, MVT::i32);
- SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
- Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
+ if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
+ Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
} else if (VT.getVectorElementType() == MVT::i1) {
-
- assert((Subtarget->hasBWI() || VT.getVectorNumElements() <= 16)
- && "Unexpected vector type");
- assert((Subtarget->hasVLX() || VT.getVectorNumElements() >= 8)
- && "Unexpected vector type");
- SDValue Cst = DAG.getConstant(0, dl, MVT::i1);
- SmallVector<SDValue, 64> Ops(VT.getVectorNumElements(), Cst);
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
- } else
- llvm_unreachable("Unexpected vector type");
-
+ assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
+ "Unexpected vector type");
+ assert((Subtarget.hasVLX() || VT.getVectorNumElements() >= 8) &&
+ "Unexpected vector type");
+ Vec = DAG.getConstant(0, dl, VT);
+ } else {
+ unsigned Num32BitElts = VT.getSizeInBits() / 32;
+ Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
+ }
return DAG.getBitcast(VT, Vec);
}
-static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
- SelectionDAG &DAG, SDLoc dl,
- unsigned vectorWidth) {
+static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
+ const SDLoc &dl, unsigned vectorWidth) {
assert((vectorWidth == 128 || vectorWidth == 256) &&
"Unsupported vector width");
EVT VT = Vec.getValueType();
@@ -4490,7 +4417,7 @@ static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
VT.getVectorNumElements()/Factor);
// Extract from UNDEF is UNDEF.
- if (Vec.getOpcode() == ISD::UNDEF)
+ if (Vec.isUndef())
return DAG.getUNDEF(ResultVT);
// Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
@@ -4503,8 +4430,8 @@ static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
// If the input is a buildvector just emit a smaller one.
if (Vec.getOpcode() == ISD::BUILD_VECTOR)
- return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
- makeArrayRef(Vec->op_begin() + IdxVal, ElemsPerChunk));
+ return DAG.getNode(ISD::BUILD_VECTOR,
+ dl, ResultVT, makeArrayRef(Vec->op_begin() + IdxVal, ElemsPerChunk));
SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
@@ -4516,27 +4443,27 @@ static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
/// instructions or a simple subregister reference. Idx is an index in the
/// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
/// lowering EXTRACT_VECTOR_ELT operations easier.
-static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
- SelectionDAG &DAG, SDLoc dl) {
+static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
+ SelectionDAG &DAG, const SDLoc &dl) {
assert((Vec.getValueType().is256BitVector() ||
Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
- return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
+ return extractSubVector(Vec, IdxVal, DAG, dl, 128);
}
/// Generate a DAG to grab 256-bits from a 512-bit vector.
-static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
- SelectionDAG &DAG, SDLoc dl) {
+static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
+ SelectionDAG &DAG, const SDLoc &dl) {
assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
- return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
+ return extractSubVector(Vec, IdxVal, DAG, dl, 256);
}
-static SDValue InsertSubVector(SDValue Result, SDValue Vec,
- unsigned IdxVal, SelectionDAG &DAG,
- SDLoc dl, unsigned vectorWidth) {
+static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
+ SelectionDAG &DAG, const SDLoc &dl,
+ unsigned vectorWidth) {
assert((vectorWidth == 128 || vectorWidth == 256) &&
"Unsupported vector width");
// Inserting UNDEF is Result
- if (Vec.getOpcode() == ISD::UNDEF)
+ if (Vec.isUndef())
return Result;
EVT VT = Vec.getValueType();
EVT ElVT = VT.getVectorElementType();
@@ -4560,8 +4487,8 @@ static SDValue InsertSubVector(SDValue Result, SDValue Vec,
/// simple superregister reference. Idx is an index in the 128 bits
/// we want. It need not be aligned to a 128-bit boundary. That makes
/// lowering INSERT_VECTOR_ELT operations easier.
-static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
- SelectionDAG &DAG, SDLoc dl) {
+static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
+ SelectionDAG &DAG, const SDLoc &dl) {
assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
// For insertion into the zero index (low half) of a 256-bit vector, it is
@@ -4570,7 +4497,7 @@ static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
// extend the subvector to the size of the result vector. Make sure that
// we are not recursing on that node by checking for undef here.
if (IdxVal == 0 && Result.getValueType().is256BitVector() &&
- Result.getOpcode() != ISD::UNDEF) {
+ !Result.isUndef()) {
EVT ResultVT = Result.getValueType();
SDValue ZeroIndex = DAG.getIntPtrConstant(0, dl);
SDValue Undef = DAG.getUNDEF(ResultVT);
@@ -4607,17 +4534,18 @@ static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
return DAG.getBitcast(ResultVT, Vec256);
}
- return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
+ return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
}
-static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
- SelectionDAG &DAG, SDLoc dl) {
+static SDValue insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
+ SelectionDAG &DAG, const SDLoc &dl) {
assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
- return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
+ return insertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
}
/// Insert i1-subvector to i1-vector.
-static SDValue Insert1BitVector(SDValue Op, SelectionDAG &DAG) {
+static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
SDLoc dl(Op);
SDValue Vec = Op.getOperand(0);
@@ -4647,43 +4575,71 @@ static SDValue Insert1BitVector(SDValue Op, SelectionDAG &DAG) {
// 3. Subvector should be inserted in the middle (for example v2i1
// to v16i1, index 2)
+ // extend to natively supported kshift
+ MVT MinVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
+ MVT WideOpVT = OpVT;
+ if (OpVT.getSizeInBits() < MinVT.getStoreSizeInBits())
+ WideOpVT = MinVT;
+
SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
- SDValue Undef = DAG.getUNDEF(OpVT);
- SDValue WideSubVec =
- DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Undef, SubVec, ZeroIdx);
- if (Vec.isUndef())
- return DAG.getNode(X86ISD::VSHLI, dl, OpVT, WideSubVec,
- DAG.getConstant(IdxVal, dl, MVT::i8));
+ SDValue Undef = DAG.getUNDEF(WideOpVT);
+ SDValue WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
+ Undef, SubVec, ZeroIdx);
+
+ // Extract sub-vector if require.
+ auto ExtractSubVec = [&](SDValue V) {
+ return (WideOpVT == OpVT) ? V : DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
+ OpVT, V, ZeroIdx);
+ };
+
+ if (Vec.isUndef()) {
+ if (IdxVal != 0) {
+ SDValue ShiftBits = DAG.getConstant(IdxVal, dl, MVT::i8);
+ WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec, ShiftBits);
+ }
+ return ExtractSubVec(WideSubVec);
+ }
if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
+ NumElems = WideOpVT.getVectorNumElements();
unsigned ShiftLeft = NumElems - SubVecNumElems;
unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
- WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, WideSubVec,
- DAG.getConstant(ShiftLeft, dl, MVT::i8));
- return ShiftRight ? DAG.getNode(X86ISD::VSRLI, dl, OpVT, WideSubVec,
- DAG.getConstant(ShiftRight, dl, MVT::i8)) : WideSubVec;
+ Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec,
+ DAG.getConstant(ShiftLeft, dl, MVT::i8));
+ Vec = ShiftRight ? DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec,
+ DAG.getConstant(ShiftRight, dl, MVT::i8)) : Vec;
+ return ExtractSubVec(Vec);
}
if (IdxVal == 0) {
// Zero lower bits of the Vec
SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
- Vec = DAG.getNode(X86ISD::VSRLI, dl, OpVT, Vec, ShiftBits);
- Vec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec, ShiftBits);
- // Merge them together
- return DAG.getNode(ISD::OR, dl, OpVT, Vec, WideSubVec);
+ Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
+ Vec = DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec, ShiftBits);
+ Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, Vec, ShiftBits);
+ // Merge them together, SubVec should be zero extended.
+ WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
+ getZeroVector(WideOpVT, Subtarget, DAG, dl),
+ SubVec, ZeroIdx);
+ Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, WideSubVec);
+ return ExtractSubVec(Vec);
}
// Simple case when we put subvector in the upper part
if (IdxVal + SubVecNumElems == NumElems) {
// Zero upper bits of the Vec
- WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec,
- DAG.getConstant(IdxVal, dl, MVT::i8));
+ WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec,
+ DAG.getConstant(IdxVal, dl, MVT::i8));
SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
- Vec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec, ShiftBits);
- Vec = DAG.getNode(X86ISD::VSRLI, dl, OpVT, Vec, ShiftBits);
- return DAG.getNode(ISD::OR, dl, OpVT, Vec, WideSubVec);
+ Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
+ Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, Vec, ShiftBits);
+ Vec = DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec, ShiftBits);
+ Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, WideSubVec);
+ return ExtractSubVec(Vec);
}
// Subvector should be inserted in the middle - use shuffle
+ WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Undef,
+ SubVec, ZeroIdx);
SmallVector<int, 64> Mask;
for (unsigned i = 0; i < NumElems; ++i)
Mask.push_back(i >= IdxVal && i < IdxVal + SubVecNumElems ?
@@ -4695,103 +4651,206 @@ static SDValue Insert1BitVector(SDValue Op, SelectionDAG &DAG) {
/// instructions. This is used because creating CONCAT_VECTOR nodes of
/// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
/// large BUILD_VECTORS.
-static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
+static SDValue concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
unsigned NumElems, SelectionDAG &DAG,
- SDLoc dl) {
- SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
- return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
+ const SDLoc &dl) {
+ SDValue V = insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
+ return insert128BitVector(V, V2, NumElems / 2, DAG, dl);
}
-static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
+static SDValue concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
unsigned NumElems, SelectionDAG &DAG,
- SDLoc dl) {
- SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
- return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
+ const SDLoc &dl) {
+ SDValue V = insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
+ return insert256BitVector(V, V2, NumElems / 2, DAG, dl);
}
/// Returns a vector of specified type with all bits set.
/// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
-/// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
+/// no AVX2 support, use two <4 x i32> inserted in a <8 x i32> appropriately.
/// Then bitcast to their original type, ensuring they get CSE'd.
-static SDValue getOnesVector(EVT VT, const X86Subtarget *Subtarget,
- SelectionDAG &DAG, SDLoc dl) {
- assert(VT.isVector() && "Expected a vector type");
+static SDValue getOnesVector(EVT VT, const X86Subtarget &Subtarget,
+ SelectionDAG &DAG, const SDLoc &dl) {
+ assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
+ "Expected a 128/256/512-bit vector type");
- SDValue Cst = DAG.getConstant(~0U, dl, MVT::i32);
+ APInt Ones = APInt::getAllOnesValue(32);
+ unsigned NumElts = VT.getSizeInBits() / 32;
SDValue Vec;
- if (VT.is512BitVector()) {
- SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
- Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
- } else if (VT.is256BitVector()) {
- if (Subtarget->hasInt256()) { // AVX2
- SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
- } else { // AVX
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
- Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
- }
- } else if (VT.is128BitVector()) {
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
- } else
- llvm_unreachable("Unexpected vector type");
-
+ if (!Subtarget.hasInt256() && NumElts == 8) {
+ Vec = DAG.getConstant(Ones, dl, MVT::v4i32);
+ Vec = concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
+ } else {
+ Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
+ }
return DAG.getBitcast(VT, Vec);
}
/// Returns a vector_shuffle node for an unpackl operation.
-static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
- SDValue V2) {
+static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
+ SDValue V1, SDValue V2) {
+ assert(VT.is128BitVector() && "Expected a 128-bit vector type");
unsigned NumElems = VT.getVectorNumElements();
- SmallVector<int, 8> Mask;
+ SmallVector<int, 8> Mask(NumElems);
for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
- Mask.push_back(i);
- Mask.push_back(i + NumElems);
+ Mask[i * 2] = i;
+ Mask[i * 2 + 1] = i + NumElems;
}
- return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
+ return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
}
/// Returns a vector_shuffle node for an unpackh operation.
-static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
- SDValue V2) {
+static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
+ SDValue V1, SDValue V2) {
+ assert(VT.is128BitVector() && "Expected a 128-bit vector type");
unsigned NumElems = VT.getVectorNumElements();
- SmallVector<int, 8> Mask;
+ SmallVector<int, 8> Mask(NumElems);
for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
- Mask.push_back(i + Half);
- Mask.push_back(i + NumElems + Half);
+ Mask[i * 2] = i + Half;
+ Mask[i * 2 + 1] = i + NumElems + Half;
}
- return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
+ return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
}
/// Return a vector_shuffle of the specified vector of zero or undef vector.
/// This produces a shuffle where the low element of V2 is swizzled into the
/// zero/undef vector, landing at element Idx.
/// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
-static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
+static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
bool IsZero,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = V2.getSimpleValueType();
SDValue V1 = IsZero
? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
- unsigned NumElems = VT.getVectorNumElements();
- SmallVector<int, 16> MaskVec;
- for (unsigned i = 0; i != NumElems; ++i)
+ int NumElems = VT.getVectorNumElements();
+ SmallVector<int, 16> MaskVec(NumElems);
+ for (int i = 0; i != NumElems; ++i)
// If this is the insertion idx, put the low elt of V2 here.
- MaskVec.push_back(i == Idx ? NumElems : i);
- return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
+ MaskVec[i] = (i == Idx) ? NumElems : i;
+ return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
+}
+
+static SDValue peekThroughBitcasts(SDValue V) {
+ while (V.getNode() && V.getOpcode() == ISD::BITCAST)
+ V = V.getOperand(0);
+ return V;
+}
+
+static bool getTargetShuffleMaskIndices(SDValue MaskNode,
+ unsigned MaskEltSizeInBits,
+ SmallVectorImpl<uint64_t> &RawMask) {
+ MaskNode = peekThroughBitcasts(MaskNode);
+
+ MVT VT = MaskNode.getSimpleValueType();
+ assert(VT.isVector() && "Can't produce a non-vector with a build_vector!");
+
+ // Split an APInt element into MaskEltSizeInBits sized pieces and
+ // insert into the shuffle mask.
+ auto SplitElementToMask = [&](APInt Element) {
+ // Note that this is x86 and so always little endian: the low byte is
+ // the first byte of the mask.
+ int Split = VT.getScalarSizeInBits() / MaskEltSizeInBits;
+ for (int i = 0; i < Split; ++i) {
+ APInt RawElt = Element.getLoBits(MaskEltSizeInBits);
+ Element = Element.lshr(MaskEltSizeInBits);
+ RawMask.push_back(RawElt.getZExtValue());
+ }
+ };
+
+ if (MaskNode.getOpcode() == X86ISD::VBROADCAST) {
+ // TODO: Handle (MaskEltSizeInBits % VT.getScalarSizeInBits()) == 0
+ // TODO: Handle (VT.getScalarSizeInBits() % MaskEltSizeInBits) == 0
+ if (VT.getScalarSizeInBits() != MaskEltSizeInBits)
+ return false;
+ if (auto *CN = dyn_cast<ConstantSDNode>(MaskNode.getOperand(0))) {
+ const APInt &MaskElement = CN->getAPIntValue();
+ for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
+ APInt RawElt = MaskElement.getLoBits(MaskEltSizeInBits);
+ RawMask.push_back(RawElt.getZExtValue());
+ }
+ }
+ return false;
+ }
+
+ if (MaskNode.getOpcode() == X86ISD::VZEXT_MOVL &&
+ MaskNode.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR) {
+
+ // TODO: Handle (MaskEltSizeInBits % VT.getScalarSizeInBits()) == 0
+ if ((VT.getScalarSizeInBits() % MaskEltSizeInBits) != 0)
+ return false;
+ unsigned ElementSplit = VT.getScalarSizeInBits() / MaskEltSizeInBits;
+
+ SDValue MaskOp = MaskNode.getOperand(0).getOperand(0);
+ if (auto *CN = dyn_cast<ConstantSDNode>(MaskOp)) {
+ SplitElementToMask(CN->getAPIntValue());
+ RawMask.append((VT.getVectorNumElements() - 1) * ElementSplit, 0);
+ return true;
+ }
+ return false;
+ }
+
+ if (MaskNode.getOpcode() != ISD::BUILD_VECTOR)
+ return false;
+
+ // We can always decode if the buildvector is all zero constants,
+ // but can't use isBuildVectorAllZeros as it might contain UNDEFs.
+ if (llvm::all_of(MaskNode->ops(), X86::isZeroNode)) {
+ RawMask.append(VT.getSizeInBits() / MaskEltSizeInBits, 0);
+ return true;
+ }
+
+ // TODO: Handle (MaskEltSizeInBits % VT.getScalarSizeInBits()) == 0
+ if ((VT.getScalarSizeInBits() % MaskEltSizeInBits) != 0)
+ return false;
+
+ for (SDValue Op : MaskNode->ops()) {
+ if (auto *CN = dyn_cast<ConstantSDNode>(Op.getNode()))
+ SplitElementToMask(CN->getAPIntValue());
+ else if (auto *CFN = dyn_cast<ConstantFPSDNode>(Op.getNode()))
+ SplitElementToMask(CFN->getValueAPF().bitcastToAPInt());
+ else
+ return false;
+ }
+
+ return true;
+}
+
+static const Constant *getTargetShuffleMaskConstant(SDValue MaskNode) {
+ MaskNode = peekThroughBitcasts(MaskNode);
+
+ auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
+ if (!MaskLoad)
+ return nullptr;
+
+ SDValue Ptr = MaskLoad->getBasePtr();
+ if (Ptr->getOpcode() == X86ISD::Wrapper ||
+ Ptr->getOpcode() == X86ISD::WrapperRIP)
+ Ptr = Ptr->getOperand(0);
+
+ auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
+ if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
+ return nullptr;
+
+ return dyn_cast<Constant>(MaskCP->getConstVal());
}
/// Calculates the shuffle mask corresponding to the target-specific opcode.
-/// Returns true if the Mask could be calculated. Sets IsUnary to true if only
-/// uses one source. Note that this will set IsUnary for shuffles which use a
-/// single input multiple times, and in those cases it will
-/// adjust the mask to only have indices within that single input.
+/// If the mask could be calculated, returns it in \p Mask, returns the shuffle
+/// operands in \p Ops, and returns true.
+/// Sets \p IsUnary to true if only one source is used. Note that this will set
+/// IsUnary for shuffles which use a single input multiple times, and in those
+/// cases it will adjust the mask to only have indices within that single input.
+/// It is an error to call this with non-empty Mask/Ops vectors.
static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
+ SmallVectorImpl<SDValue> &Ops,
SmallVectorImpl<int> &Mask, bool &IsUnary) {
unsigned NumElems = VT.getVectorNumElements();
SDValue ImmN;
+ assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
+ assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
+
IsUnary = false;
bool IsFakeUnary = false;
switch(N->getOpcode()) {
@@ -4826,9 +4885,22 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::PALIGNR:
+ assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
ImmN = N->getOperand(N->getNumOperands()-1);
DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
break;
+ case X86ISD::VSHLDQ:
+ assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
+ ImmN = N->getOperand(N->getNumOperands() - 1);
+ DecodePSLLDQMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
+ IsUnary = true;
+ break;
+ case X86ISD::VSRLDQ:
+ assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
+ ImmN = N->getOperand(N->getNumOperands() - 1);
+ DecodePSRLDQMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
+ IsUnary = true;
+ break;
case X86ISD::PSHUFD:
case X86ISD::VPERMILPI:
ImmN = N->getOperand(N->getNumOperands()-1);
@@ -4845,70 +4917,51 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = true;
break;
+ case X86ISD::VZEXT_MOVL:
+ DecodeZeroMoveLowMask(VT, Mask);
+ IsUnary = true;
+ break;
+ case X86ISD::VBROADCAST: {
+ // We only decode broadcasts of same-sized vectors at the moment.
+ if (N->getOperand(0).getValueType() == VT) {
+ DecodeVectorBroadcast(VT, Mask);
+ IsUnary = true;
+ break;
+ }
+ return false;
+ }
+ case X86ISD::VPERMILPV: {
+ IsUnary = true;
+ SDValue MaskNode = N->getOperand(1);
+ unsigned MaskEltSize = VT.getScalarSizeInBits();
+ SmallVector<uint64_t, 32> RawMask;
+ if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask)) {
+ DecodeVPERMILPMask(VT, RawMask, Mask);
+ break;
+ }
+ if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
+ DecodeVPERMILPMask(C, MaskEltSize, Mask);
+ break;
+ }
+ return false;
+ }
case X86ISD::PSHUFB: {
IsUnary = true;
SDValue MaskNode = N->getOperand(1);
- while (MaskNode->getOpcode() == ISD::BITCAST)
- MaskNode = MaskNode->getOperand(0);
-
- if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
- // If we have a build-vector, then things are easy.
- MVT VT = MaskNode.getSimpleValueType();
- assert(VT.isVector() &&
- "Can't produce a non-vector with a build_vector!");
- if (!VT.isInteger())
- return false;
-
- int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
-
- SmallVector<uint64_t, 32> RawMask;
- for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
- SDValue Op = MaskNode->getOperand(i);
- if (Op->getOpcode() == ISD::UNDEF) {
- RawMask.push_back((uint64_t)SM_SentinelUndef);
- continue;
- }
- auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
- if (!CN)
- return false;
- APInt MaskElement = CN->getAPIntValue();
-
- // We now have to decode the element which could be any integer size and
- // extract each byte of it.
- for (int j = 0; j < NumBytesPerElement; ++j) {
- // Note that this is x86 and so always little endian: the low byte is
- // the first byte of the mask.
- RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
- MaskElement = MaskElement.lshr(8);
- }
- }
+ SmallVector<uint64_t, 32> RawMask;
+ if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask)) {
DecodePSHUFBMask(RawMask, Mask);
break;
}
-
- auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
- if (!MaskLoad)
- return false;
-
- SDValue Ptr = MaskLoad->getBasePtr();
- if (Ptr->getOpcode() == X86ISD::Wrapper ||
- Ptr->getOpcode() == X86ISD::WrapperRIP)
- Ptr = Ptr->getOperand(0);
-
- auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
- if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
- return false;
-
- if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
+ if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
DecodePSHUFBMask(C, Mask);
break;
}
-
return false;
}
case X86ISD::VPERMI:
ImmN = N->getOperand(N->getNumOperands()-1);
- DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
+ DecodeVPERMMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = true;
break;
case X86ISD::MOVSS:
@@ -4937,110 +4990,63 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
case X86ISD::MOVLPS:
// Not yet implemented
return false;
+ case X86ISD::VPERMIL2: {
+ IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
+ unsigned MaskEltSize = VT.getScalarSizeInBits();
+ SDValue MaskNode = N->getOperand(2);
+ SDValue CtrlNode = N->getOperand(3);
+ if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
+ unsigned CtrlImm = CtrlOp->getZExtValue();
+ SmallVector<uint64_t, 32> RawMask;
+ if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask)) {
+ DecodeVPERMIL2PMask(VT, CtrlImm, RawMask, Mask);
+ break;
+ }
+ if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
+ DecodeVPERMIL2PMask(C, CtrlImm, MaskEltSize, Mask);
+ break;
+ }
+ }
+ return false;
+ }
+ case X86ISD::VPPERM: {
+ IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
+ SDValue MaskNode = N->getOperand(2);
+ SmallVector<uint64_t, 32> RawMask;
+ if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask)) {
+ DecodeVPPERMMask(RawMask, Mask);
+ break;
+ }
+ if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
+ DecodeVPPERMMask(C, Mask);
+ break;
+ }
+ return false;
+ }
case X86ISD::VPERMV: {
IsUnary = true;
+ // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
+ Ops.push_back(N->getOperand(1));
SDValue MaskNode = N->getOperand(0);
- while (MaskNode->getOpcode() == ISD::BITCAST)
- MaskNode = MaskNode->getOperand(0);
-
- unsigned MaskLoBits = Log2_64(VT.getVectorNumElements());
SmallVector<uint64_t, 32> RawMask;
- if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
- // If we have a build-vector, then things are easy.
- assert(MaskNode.getSimpleValueType().isInteger() &&
- MaskNode.getSimpleValueType().getVectorNumElements() ==
- VT.getVectorNumElements());
-
- for (unsigned i = 0; i < MaskNode->getNumOperands(); ++i) {
- SDValue Op = MaskNode->getOperand(i);
- if (Op->getOpcode() == ISD::UNDEF)
- RawMask.push_back((uint64_t)SM_SentinelUndef);
- else if (isa<ConstantSDNode>(Op)) {
- APInt MaskElement = cast<ConstantSDNode>(Op)->getAPIntValue();
- RawMask.push_back(MaskElement.getLoBits(MaskLoBits).getZExtValue());
- } else
- return false;
- }
+ unsigned MaskEltSize = VT.getScalarSizeInBits();
+ if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask)) {
DecodeVPERMVMask(RawMask, Mask);
break;
}
- if (MaskNode->getOpcode() == X86ISD::VBROADCAST) {
- unsigned NumEltsInMask = MaskNode->getNumOperands();
- MaskNode = MaskNode->getOperand(0);
- if (auto *CN = dyn_cast<ConstantSDNode>(MaskNode)) {
- APInt MaskEltValue = CN->getAPIntValue();
- for (unsigned i = 0; i < NumEltsInMask; ++i)
- RawMask.push_back(MaskEltValue.getLoBits(MaskLoBits).getZExtValue());
- DecodeVPERMVMask(RawMask, Mask);
- break;
- }
- // It may be a scalar load
- }
-
- auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
- if (!MaskLoad)
- return false;
-
- SDValue Ptr = MaskLoad->getBasePtr();
- if (Ptr->getOpcode() == X86ISD::Wrapper ||
- Ptr->getOpcode() == X86ISD::WrapperRIP)
- Ptr = Ptr->getOperand(0);
-
- auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
- if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
- return false;
-
- if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
+ if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
DecodeVPERMVMask(C, VT, Mask);
break;
}
return false;
}
case X86ISD::VPERMV3: {
- IsUnary = false;
+ IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
+ // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
+ Ops.push_back(N->getOperand(0));
+ Ops.push_back(N->getOperand(2));
SDValue MaskNode = N->getOperand(1);
- while (MaskNode->getOpcode() == ISD::BITCAST)
- MaskNode = MaskNode->getOperand(1);
-
- if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
- // If we have a build-vector, then things are easy.
- assert(MaskNode.getSimpleValueType().isInteger() &&
- MaskNode.getSimpleValueType().getVectorNumElements() ==
- VT.getVectorNumElements());
-
- SmallVector<uint64_t, 32> RawMask;
- unsigned MaskLoBits = Log2_64(VT.getVectorNumElements()*2);
-
- for (unsigned i = 0; i < MaskNode->getNumOperands(); ++i) {
- SDValue Op = MaskNode->getOperand(i);
- if (Op->getOpcode() == ISD::UNDEF)
- RawMask.push_back((uint64_t)SM_SentinelUndef);
- else {
- auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
- if (!CN)
- return false;
- APInt MaskElement = CN->getAPIntValue();
- RawMask.push_back(MaskElement.getLoBits(MaskLoBits).getZExtValue());
- }
- }
- DecodeVPERMV3Mask(RawMask, Mask);
- break;
- }
-
- auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
- if (!MaskLoad)
- return false;
-
- SDValue Ptr = MaskLoad->getBasePtr();
- if (Ptr->getOpcode() == X86ISD::Wrapper ||
- Ptr->getOpcode() == X86ISD::WrapperRIP)
- Ptr = Ptr->getOperand(0);
-
- auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
- if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
- return false;
-
- if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
+ if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
DecodeVPERMV3Mask(C, VT, Mask);
break;
}
@@ -5055,8 +5061,7 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
// Check if we're getting a shuffle mask with zero'd elements.
if (!AllowSentinelZero)
- if (std::any_of(Mask.begin(), Mask.end(),
- [](int M){ return M == SM_SentinelZero; }))
+ if (llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
return false;
// If we have a fake unary shuffle, the shuffle mask is spread across two
@@ -5067,6 +5072,123 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
if (M >= (int)Mask.size())
M -= Mask.size();
+ // If we didn't already add operands in the opcode-specific code, default to
+ // adding 1 or 2 operands starting at 0.
+ if (Ops.empty()) {
+ Ops.push_back(N->getOperand(0));
+ if (!IsUnary || IsFakeUnary)
+ Ops.push_back(N->getOperand(1));
+ }
+
+ return true;
+}
+
+/// Check a target shuffle mask's inputs to see if we can set any values to
+/// SM_SentinelZero - this is for elements that are known to be zero
+/// (not just zeroable) from their inputs.
+/// Returns true if the target shuffle mask was decoded.
+static bool setTargetShuffleZeroElements(SDValue N,
+ SmallVectorImpl<int> &Mask,
+ SmallVectorImpl<SDValue> &Ops) {
+ bool IsUnary;
+ if (!isTargetShuffle(N.getOpcode()))
+ return false;
+ if (!getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), true, Ops,
+ Mask, IsUnary))
+ return false;
+
+ SDValue V1 = Ops[0];
+ SDValue V2 = IsUnary ? V1 : Ops[1];
+
+ V1 = peekThroughBitcasts(V1);
+ V2 = peekThroughBitcasts(V2);
+
+ for (int i = 0, Size = Mask.size(); i < Size; ++i) {
+ int M = Mask[i];
+
+ // Already decoded as SM_SentinelZero / SM_SentinelUndef.
+ if (M < 0)
+ continue;
+
+ // Determine shuffle input and normalize the mask.
+ SDValue V = M < Size ? V1 : V2;
+ M %= Size;
+
+ // We are referencing an UNDEF input.
+ if (V.isUndef()) {
+ Mask[i] = SM_SentinelUndef;
+ continue;
+ }
+
+ // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
+ if (V.getOpcode() != ISD::BUILD_VECTOR)
+ continue;
+
+ // If the BUILD_VECTOR has fewer elements then the (larger) source
+ // element must be UNDEF/ZERO.
+ // TODO: Is it worth testing the individual bits of a constant?
+ if ((Size % V.getNumOperands()) == 0) {
+ int Scale = Size / V->getNumOperands();
+ SDValue Op = V.getOperand(M / Scale);
+ if (Op.isUndef())
+ Mask[i] = SM_SentinelUndef;
+ else if (X86::isZeroNode(Op))
+ Mask[i] = SM_SentinelZero;
+ continue;
+ }
+
+ // If the BUILD_VECTOR has more elements then all the (smaller) source
+ // elements must be all UNDEF or all ZERO.
+ if ((V.getNumOperands() % Size) == 0) {
+ int Scale = V->getNumOperands() / Size;
+ bool AllUndef = true;
+ bool AllZero = true;
+ for (int j = 0; j < Scale; ++j) {
+ SDValue Op = V.getOperand((M * Scale) + j);
+ AllUndef &= Op.isUndef();
+ AllZero &= X86::isZeroNode(Op);
+ }
+ if (AllUndef)
+ Mask[i] = SM_SentinelUndef;
+ else if (AllZero)
+ Mask[i] = SM_SentinelZero;
+ continue;
+ }
+ }
+
+ return true;
+}
+
+/// Calls setTargetShuffleZeroElements to resolve a target shuffle mask's inputs
+/// and set the SM_SentinelUndef and SM_SentinelZero values. Then check the
+/// remaining input indices in case we now have a unary shuffle and adjust the
+/// Op0/Op1 inputs accordingly.
+/// Returns true if the target shuffle mask was decoded.
+static bool resolveTargetShuffleInputs(SDValue Op, SDValue &Op0, SDValue &Op1,
+ SmallVectorImpl<int> &Mask) {
+ SmallVector<SDValue, 2> Ops;
+ if (!setTargetShuffleZeroElements(Op, Mask, Ops))
+ return false;
+
+ int NumElts = Mask.size();
+ bool Op0InUse = std::any_of(Mask.begin(), Mask.end(), [NumElts](int Idx) {
+ return 0 <= Idx && Idx < NumElts;
+ });
+ bool Op1InUse = std::any_of(Mask.begin(), Mask.end(),
+ [NumElts](int Idx) { return NumElts <= Idx; });
+
+ Op0 = Op0InUse ? Ops[0] : SDValue();
+ Op1 = Op1InUse ? Ops[1] : SDValue();
+
+ // We're only using Op1 - commute the mask and inputs.
+ if (!Op0InUse && Op1InUse) {
+ for (int &M : Mask)
+ if (NumElts <= M)
+ M -= NumElts;
+ Op0 = Op1;
+ Op1 = SDValue();
+ }
+
return true;
}
@@ -5097,19 +5219,24 @@ static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
// Recurse into target specific vector shuffles to find scalars.
if (isTargetShuffle(Opcode)) {
MVT ShufVT = V.getSimpleValueType();
+ MVT ShufSVT = ShufVT.getVectorElementType();
int NumElems = (int)ShufVT.getVectorNumElements();
SmallVector<int, 16> ShuffleMask;
+ SmallVector<SDValue, 16> ShuffleOps;
bool IsUnary;
- if (!getTargetShuffleMask(N, ShufVT, false, ShuffleMask, IsUnary))
+ if (!getTargetShuffleMask(N, ShufVT, true, ShuffleOps, ShuffleMask, IsUnary))
return SDValue();
int Elt = ShuffleMask[Index];
+ if (Elt == SM_SentinelZero)
+ return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(N), ShufSVT)
+ : DAG.getConstantFP(+0.0, SDLoc(N), ShufSVT);
if (Elt == SM_SentinelUndef)
- return DAG.getUNDEF(ShufVT.getVectorElementType());
+ return DAG.getUNDEF(ShufSVT);
assert(0 <= Elt && Elt < (2*NumElems) && "Shuffle index out of range");
- SDValue NewV = (Elt < NumElems) ? N->getOperand(0) : N->getOperand(1);
+ SDValue NewV = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
Depth+1);
}
@@ -5138,7 +5265,7 @@ static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
- const X86Subtarget* Subtarget,
+ const X86Subtarget &Subtarget,
const TargetLowering &TLI) {
if (NumNonZero > 8)
return SDValue();
@@ -5148,7 +5275,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
bool First = true;
// SSE4.1 - use PINSRB to insert each byte directly.
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
for (unsigned i = 0; i < 16; ++i) {
bool isNonZero = (NonZeros & (1 << i)) != 0;
if (isNonZero) {
@@ -5208,7 +5335,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
- const X86Subtarget* Subtarget,
+ const X86Subtarget &Subtarget,
const TargetLowering &TLI) {
if (NumNonZero > 4)
return SDValue();
@@ -5237,13 +5364,13 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
/// Custom lower build_vector of v4i32 or v4f32.
static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
const TargetLowering &TLI) {
// Find all zeroable elements.
std::bitset<4> Zeroable;
for (int i=0; i < 4; ++i) {
SDValue Elt = Op->getOperand(i);
- Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
+ Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
}
assert(Zeroable.size() - Zeroable.count() > 1 &&
"We expect at least two non-zero elements!");
@@ -5296,12 +5423,12 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
// Let the shuffle legalizer deal with blend operations.
SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
if (V1.getSimpleValueType() != VT)
- V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
- return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
+ V1 = DAG.getBitcast(VT, V1);
+ return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, Mask);
}
// See if we can lower this build_vector to a INSERTPS.
- if (!Subtarget->hasSSE41())
+ if (!Subtarget.hasSSE41())
return SDValue();
SDValue V2 = Elt.getOperand(0);
@@ -5326,9 +5453,9 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
assert(V1.getNode() && "Expected at least two non-zero elements!");
if (V1.getSimpleValueType() != MVT::v4f32)
- V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
+ V1 = DAG.getBitcast(MVT::v4f32, V1);
if (V2.getSimpleValueType() != MVT::v4f32)
- V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
+ V2 = DAG.getBitcast(MVT::v4f32, V2);
// Ok, we can emit an INSERTPS instruction.
unsigned ZMask = Zeroable.to_ulong();
@@ -5342,11 +5469,11 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
}
/// Return a vector logical shift node.
-static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
- unsigned NumBits, SelectionDAG &DAG,
- const TargetLowering &TLI, SDLoc dl) {
+static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
+ SelectionDAG &DAG, const TargetLowering &TLI,
+ const SDLoc &dl) {
assert(VT.is128BitVector() && "Unknown type for VShift");
- MVT ShVT = MVT::v2i64;
+ MVT ShVT = MVT::v16i8;
unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
SrcOp = DAG.getBitcast(ShVT, SrcOp);
MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(DAG.getDataLayout(), VT);
@@ -5355,8 +5482,8 @@ static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
}
-static SDValue
-LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
+static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
+ SelectionDAG &DAG) {
// Check if the scalar load can be widened into a vector load. And if
// the address is "base + cst" see if the cst can be "absorbed" into
@@ -5418,12 +5545,11 @@ LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
- LD->getPointerInfo().getWithOffset(StartOffset),
- false, false, false, 0);
+ LD->getPointerInfo().getWithOffset(StartOffset));
SmallVector<int, 8> Mask(NumElems, EltNo);
- return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
+ return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
}
return SDValue();
@@ -5433,55 +5559,103 @@ LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
/// elements can be replaced by a single large load which has the same value as
/// a build_vector or insert_subvector whose loaded operands are 'Elts'.
///
-/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
-///
-/// FIXME: we'd also like to handle the case where the last elements are zero
-/// rather than undef via VZEXT_LOAD, but we do not detect that case today.
-/// There's even a handy isZeroNode for that purpose.
+/// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
SDLoc &DL, SelectionDAG &DAG,
bool isAfterLegalize) {
unsigned NumElems = Elts.size();
- LoadSDNode *LDBase = nullptr;
- unsigned LastLoadedElt = -1U;
+ int LastLoadedElt = -1;
+ SmallBitVector LoadMask(NumElems, false);
+ SmallBitVector ZeroMask(NumElems, false);
+ SmallBitVector UndefMask(NumElems, false);
- // For each element in the initializer, see if we've found a load or an undef.
- // If we don't find an initial load element, or later load elements are
- // non-consecutive, bail out.
+ // For each element in the initializer, see if we've found a load, zero or an
+ // undef.
for (unsigned i = 0; i < NumElems; ++i) {
- SDValue Elt = Elts[i];
- // Look through a bitcast.
- if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
- Elt = Elt.getOperand(0);
- if (!Elt.getNode() ||
- (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
+ SDValue Elt = peekThroughBitcasts(Elts[i]);
+ if (!Elt.getNode())
return SDValue();
- if (!LDBase) {
- if (Elt.getNode()->getOpcode() == ISD::UNDEF)
- return SDValue();
- LDBase = cast<LoadSDNode>(Elt.getNode());
- LastLoadedElt = i;
- continue;
- }
- if (Elt.getOpcode() == ISD::UNDEF)
- continue;
- LoadSDNode *LD = cast<LoadSDNode>(Elt);
- EVT LdVT = Elt.getValueType();
- // Each loaded element must be the correct fractional portion of the
- // requested vector load.
- if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
- return SDValue();
- if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
+ if (Elt.isUndef())
+ UndefMask[i] = true;
+ else if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode()))
+ ZeroMask[i] = true;
+ else if (ISD::isNON_EXTLoad(Elt.getNode())) {
+ LoadMask[i] = true;
+ LastLoadedElt = i;
+ // Each loaded element must be the correct fractional portion of the
+ // requested vector load.
+ if ((NumElems * Elt.getValueSizeInBits()) != VT.getSizeInBits())
+ return SDValue();
+ } else
return SDValue();
- LastLoadedElt = i;
}
+ assert((ZeroMask | UndefMask | LoadMask).count() == NumElems &&
+ "Incomplete element masks");
+
+ // Handle Special Cases - all undef or undef/zero.
+ if (UndefMask.count() == NumElems)
+ return DAG.getUNDEF(VT);
+
+ // FIXME: Should we return this as a BUILD_VECTOR instead?
+ if ((ZeroMask | UndefMask).count() == NumElems)
+ return VT.isInteger() ? DAG.getConstant(0, DL, VT)
+ : DAG.getConstantFP(0.0, DL, VT);
+
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ int FirstLoadedElt = LoadMask.find_first();
+ SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
+ LoadSDNode *LDBase = cast<LoadSDNode>(EltBase);
+ EVT LDBaseVT = EltBase.getValueType();
+
+ // Consecutive loads can contain UNDEFS but not ZERO elements.
+ // Consecutive loads with UNDEFs and ZEROs elements require a
+ // an additional shuffle stage to clear the ZERO elements.
+ bool IsConsecutiveLoad = true;
+ bool IsConsecutiveLoadWithZeros = true;
+ for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
+ if (LoadMask[i]) {
+ SDValue Elt = peekThroughBitcasts(Elts[i]);
+ LoadSDNode *LD = cast<LoadSDNode>(Elt);
+ if (!DAG.areNonVolatileConsecutiveLoads(
+ LD, LDBase, Elt.getValueType().getStoreSizeInBits() / 8,
+ i - FirstLoadedElt)) {
+ IsConsecutiveLoad = false;
+ IsConsecutiveLoadWithZeros = false;
+ break;
+ }
+ } else if (ZeroMask[i]) {
+ IsConsecutiveLoad = false;
+ }
+ }
+
+ auto CreateLoad = [&DAG, &DL](EVT VT, LoadSDNode *LDBase) {
+ auto MMOFlags = LDBase->getMemOperand()->getFlags();
+ assert(!(MMOFlags & MachineMemOperand::MOVolatile) &&
+ "Cannot merge volatile loads.");
+ SDValue NewLd =
+ DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
+ LDBase->getPointerInfo(), LDBase->getAlignment(), MMOFlags);
+
+ if (LDBase->hasAnyUseOfValue(1)) {
+ SDValue NewChain =
+ DAG.getNode(ISD::TokenFactor, DL, MVT::Other, SDValue(LDBase, 1),
+ SDValue(NewLd.getNode(), 1));
+ DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
+ DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
+ SDValue(NewLd.getNode(), 1));
+ }
+
+ return NewLd;
+ };
+ // LOAD - all consecutive load/undefs (must start/end with a load).
// If we have found an entire vector of loads and undefs, then return a large
- // load of the entire vector width starting at the base pointer. If we found
- // consecutive loads for the low half, generate a vzext_load node.
- if (LastLoadedElt == NumElems - 1) {
+ // load of the entire vector width starting at the base pointer.
+ // If the vector contains zeros, then attempt to shuffle those elements.
+ if (FirstLoadedElt == 0 && LastLoadedElt == (int)(NumElems - 1) &&
+ (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
assert(LDBase && "Did not find base load for merging consecutive loads");
EVT EltVT = LDBase->getValueType(0);
// Ensure that the input vector size for the merged loads matches the
@@ -5489,72 +5663,93 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
return SDValue();
- if (isAfterLegalize &&
- !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
+ if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
return SDValue();
- SDValue NewLd = SDValue();
+ if (IsConsecutiveLoad)
+ return CreateLoad(VT, LDBase);
+
+ // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
+ // vector and a zero vector to clear out the zero elements.
+ if (!isAfterLegalize && NumElems == VT.getVectorNumElements()) {
+ SmallVector<int, 4> ClearMask(NumElems, -1);
+ for (unsigned i = 0; i < NumElems; ++i) {
+ if (ZeroMask[i])
+ ClearMask[i] = i + NumElems;
+ else if (LoadMask[i])
+ ClearMask[i] = i;
+ }
+ SDValue V = CreateLoad(VT, LDBase);
+ SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
+ : DAG.getConstantFP(0.0, DL, VT);
+ return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
+ }
+ }
+
+ int LoadSize =
+ (1 + LastLoadedElt - FirstLoadedElt) * LDBaseVT.getStoreSizeInBits();
+
+ // VZEXT_LOAD - consecutive load/undefs followed by zeros/undefs.
+ if (IsConsecutiveLoad && FirstLoadedElt == 0 && LoadSize == 64 &&
+ ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
+ MVT VecSVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
+ MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / 64);
+ if (TLI.isTypeLegal(VecVT)) {
+ SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
+ SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
+ SDValue ResNode =
+ DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT,
+ LDBase->getPointerInfo(),
+ LDBase->getAlignment(),
+ false/*isVolatile*/, true/*ReadMem*/,
+ false/*WriteMem*/);
- NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
- LDBase->getPointerInfo(), LDBase->isVolatile(),
- LDBase->isNonTemporal(), LDBase->isInvariant(),
- LDBase->getAlignment());
+ // Make sure the newly-created LOAD is in the same position as LDBase in
+ // terms of dependency. We create a TokenFactor for LDBase and ResNode,
+ // and update uses of LDBase's output chain to use the TokenFactor.
+ if (LDBase->hasAnyUseOfValue(1)) {
+ SDValue NewChain =
+ DAG.getNode(ISD::TokenFactor, DL, MVT::Other, SDValue(LDBase, 1),
+ SDValue(ResNode.getNode(), 1));
+ DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
+ DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
+ SDValue(ResNode.getNode(), 1));
+ }
- if (LDBase->hasAnyUseOfValue(1)) {
- SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- SDValue(LDBase, 1),
- SDValue(NewLd.getNode(), 1));
- DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
- DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
- SDValue(NewLd.getNode(), 1));
+ return DAG.getBitcast(VT, ResNode);
}
-
- return NewLd;
}
- //TODO: The code below fires only for for loading the low v2i32 / v2f32
- //of a v4i32 / v4f32. It's probably worth generalizing.
- EVT EltVT = VT.getVectorElementType();
- if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
- DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
- SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
- SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
- SDValue ResNode =
- DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
- LDBase->getPointerInfo(),
- LDBase->getAlignment(),
- false/*isVolatile*/, true/*ReadMem*/,
- false/*WriteMem*/);
-
- // Make sure the newly-created LOAD is in the same position as LDBase in
- // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
- // update uses of LDBase's output chain to use the TokenFactor.
- if (LDBase->hasAnyUseOfValue(1)) {
- SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
- DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
- DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
- SDValue(ResNode.getNode(), 1));
+ // VZEXT_MOVL - consecutive 32-bit load/undefs followed by zeros/undefs.
+ if (IsConsecutiveLoad && FirstLoadedElt == 0 && LoadSize == 32 &&
+ ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
+ MVT VecSVT = VT.isFloatingPoint() ? MVT::f32 : MVT::i32;
+ MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / 32);
+ if (TLI.isTypeLegal(VecVT)) {
+ SDValue V = LastLoadedElt != 0 ? CreateLoad(VecSVT, LDBase)
+ : DAG.getBitcast(VecSVT, EltBase);
+ V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, V);
+ V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, VecVT, V);
+ return DAG.getBitcast(VT, V);
}
-
- return DAG.getBitcast(VT, ResNode);
}
+
return SDValue();
}
-/// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
-/// to generate a splat value for the following cases:
+/// Attempt to use the vbroadcast instruction to generate a splat value for the
+/// following cases:
/// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
/// 2. A splat shuffle which uses a scalar_to_vector node which comes from
/// a scalar load, or a constant.
/// The VBROADCAST node is returned when a pattern is found,
/// or SDValue() otherwise.
-static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
+static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// VBROADCAST requires AVX.
// TODO: Splats could be generated for non-AVX CPUs using SSE
// instructions, but there's less potential gain for only 128-bit vectors.
- if (!Subtarget->hasAVX())
+ if (!Subtarget.hasAVX())
return SDValue();
MVT VT = Op.getSimpleValueType();
@@ -5604,12 +5799,12 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
Sc.getOpcode() != ISD::BUILD_VECTOR) {
- if (!Subtarget->hasInt256())
+ if (!Subtarget.hasInt256())
return SDValue();
// Use the register form of the broadcast instruction available on AVX2.
if (VT.getSizeInBits() >= 256)
- Sc = Extract128BitVector(Sc, 0, DAG, dl);
+ Sc = extract128BitVector(Sc, 0, DAG, dl);
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
}
@@ -5622,7 +5817,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
// Constants may have multiple users.
// AVX-512 has register version of the broadcast
- bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
+ bool hasRegVer = Subtarget.hasAVX512() && VT.is512BitVector() &&
Ld.getValueType().getSizeInBits() >= 32;
if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
!hasRegVer))
@@ -5647,7 +5842,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
// from the constant pool and not to broadcast it from a scalar.
// But override that restriction when optimizing for size.
// TODO: Check if splatting is recommended for other AVX-capable CPUs.
- if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
+ if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
EVT CVT = Ld.getValueType();
assert(!CVT.isVector() && "Must not broadcast a vector type");
@@ -5656,7 +5851,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
// with AVX2, also splat i8 and i16.
// With pattern matching, the VBROADCAST node may become a VMOVDDUP.
if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
- (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
+ (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
const Constant *C = nullptr;
if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
C = CI->getConstantIntValue();
@@ -5671,8 +5866,8 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
Ld = DAG.getLoad(
CVT, dl, DAG.getEntryNode(), CP,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false,
- false, false, Alignment);
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
+ Alignment);
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
}
@@ -5681,7 +5876,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
bool IsLoad = ISD::isNormalLoad(Ld.getNode());
// Handle AVX2 in-register broadcasts.
- if (!IsLoad && Subtarget->hasInt256() &&
+ if (!IsLoad && Subtarget.hasInt256() &&
(ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
@@ -5690,12 +5885,12 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
return SDValue();
if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
- (Subtarget->hasVLX() && ScalarSize == 64))
+ (Subtarget.hasVLX() && ScalarSize == 64))
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
// The integer check is needed for the 64-bit into 128-bit so it doesn't match
// double since there is no vbroadcastsd xmm
- if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
+ if (Subtarget.hasInt256() && Ld.getValueType().isInteger()) {
if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
}
@@ -5801,7 +5996,7 @@ static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
return SDValue();
VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
- SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
+ SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
unsigned Idx = InsertIndices[i];
NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
@@ -5818,7 +6013,7 @@ static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {
uint64_t Immediate = 0;
for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
SDValue In = Op.getOperand(idx);
- if (In.getOpcode() != ISD::UNDEF)
+ if (!In.isUndef())
Immediate |= cast<ConstantSDNode>(In)->getZExtValue() << idx;
}
SDLoc dl(Op);
@@ -5835,17 +6030,11 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
"Unexpected type in LowerBUILD_VECTORvXi1!");
SDLoc dl(Op);
- if (ISD::isBuildVectorAllZeros(Op.getNode())) {
- SDValue Cst = DAG.getTargetConstant(0, dl, MVT::i1);
- SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
- }
+ if (ISD::isBuildVectorAllZeros(Op.getNode()))
+ return DAG.getTargetConstant(0, dl, VT);
- if (ISD::isBuildVectorAllOnes(Op.getNode())) {
- SDValue Cst = DAG.getTargetConstant(1, dl, MVT::i1);
- SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
- }
+ if (ISD::isBuildVectorAllOnes(Op.getNode()))
+ return DAG.getTargetConstant(1, dl, VT);
if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
SDValue Imm = ConvertI1VectorToInteger(Op, DAG);
@@ -5864,7 +6053,7 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
int SplatIdx = -1;
for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
SDValue In = Op.getOperand(idx);
- if (In.getOpcode() == ISD::UNDEF)
+ if (In.isUndef())
continue;
if (!isa<ConstantSDNode>(In))
NonConstIdx.push_back(idx);
@@ -5872,7 +6061,7 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
Immediate |= cast<ConstantSDNode>(In)->getZExtValue() << idx;
HasConstElts = true;
}
- if (SplatIdx == -1)
+ if (SplatIdx < 0)
SplatIdx = idx;
else if (In != Op.getOperand(SplatIdx))
IsSplat = false;
@@ -5903,7 +6092,7 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
DAG.getIntPtrConstant(0, dl));
}
- for (unsigned i = 0; i < NonConstIdx.size(); ++i) {
+ for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
unsigned InsertIdx = NonConstIdx[i];
DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
Op.getOperand(InsertIdx),
@@ -5948,7 +6137,7 @@ static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
SDValue Op = N->getOperand(i + BaseIdx);
// Skip UNDEFs.
- if (Op->getOpcode() == ISD::UNDEF) {
+ if (Op->isUndef()) {
// Update the expected vector extract index.
if (i * 2 == NumElts)
ExpectedVExtractIdx = BaseIdx;
@@ -5978,13 +6167,13 @@ static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
if (i * 2 < NumElts) {
- if (V0.getOpcode() == ISD::UNDEF) {
+ if (V0.isUndef()) {
V0 = Op0.getOperand(0);
if (V0.getValueType() != VT)
return false;
}
} else {
- if (V1.getOpcode() == ISD::UNDEF) {
+ if (V1.isUndef()) {
V1 = Op0.getOperand(0);
if (V1.getValueType() != VT)
return false;
@@ -6041,37 +6230,35 @@ static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
/// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
/// the upper 128-bits of the result.
static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
- SDLoc DL, SelectionDAG &DAG,
+ const SDLoc &DL, SelectionDAG &DAG,
unsigned X86Opcode, bool Mode,
bool isUndefLO, bool isUndefHI) {
- EVT VT = V0.getValueType();
- assert(VT.is256BitVector() && VT == V1.getValueType() &&
+ MVT VT = V0.getSimpleValueType();
+ assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
"Invalid nodes in input!");
unsigned NumElts = VT.getVectorNumElements();
- SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
- SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
- SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
- SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
- EVT NewVT = V0_LO.getValueType();
+ SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
+ SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
+ SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
+ SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
+ MVT NewVT = V0_LO.getSimpleValueType();
SDValue LO = DAG.getUNDEF(NewVT);
SDValue HI = DAG.getUNDEF(NewVT);
if (Mode) {
// Don't emit a horizontal binop if the result is expected to be UNDEF.
- if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
+ if (!isUndefLO && !V0->isUndef())
LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
- if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
+ if (!isUndefHI && !V1->isUndef())
HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
} else {
// Don't emit a horizontal binop if the result is expected to be UNDEF.
- if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
- V1_LO->getOpcode() != ISD::UNDEF))
+ if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
- if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
- V1_HI->getOpcode() != ISD::UNDEF))
+ if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
}
@@ -6081,10 +6268,10 @@ static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
/// Try to fold a build_vector that performs an 'addsub' to an X86ISD::ADDSUB
/// node.
static SDValue LowerToAddSub(const BuildVectorSDNode *BV,
- const X86Subtarget *Subtarget, SelectionDAG &DAG) {
+ const X86Subtarget &Subtarget, SelectionDAG &DAG) {
MVT VT = BV->getSimpleValueType(0);
- if ((!Subtarget->hasSSE3() || (VT != MVT::v4f32 && VT != MVT::v2f64)) &&
- (!Subtarget->hasAVX() || (VT != MVT::v8f32 && VT != MVT::v4f64)))
+ if ((!Subtarget.hasSSE3() || (VT != MVT::v4f32 && VT != MVT::v2f64)) &&
+ (!Subtarget.hasAVX() || (VT != MVT::v8f32 && VT != MVT::v4f64)))
return SDValue();
SDLoc DL(BV);
@@ -6142,12 +6329,12 @@ static SDValue LowerToAddSub(const BuildVectorSDNode *BV,
SubFound = true;
// Update InVec0 and InVec1.
- if (InVec0.getOpcode() == ISD::UNDEF) {
+ if (InVec0.isUndef()) {
InVec0 = Op0.getOperand(0);
if (InVec0.getSimpleValueType() != VT)
return SDValue();
}
- if (InVec1.getOpcode() == ISD::UNDEF) {
+ if (InVec1.isUndef()) {
InVec1 = Op1.getOperand(0);
if (InVec1.getSimpleValueType() != VT)
return SDValue();
@@ -6174,8 +6361,7 @@ static SDValue LowerToAddSub(const BuildVectorSDNode *BV,
}
// Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
- if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
- InVec1.getOpcode() != ISD::UNDEF)
+ if (AddFound && SubFound && !InVec0.isUndef() && !InVec1.isUndef())
return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
return SDValue();
@@ -6183,7 +6369,7 @@ static SDValue LowerToAddSub(const BuildVectorSDNode *BV,
/// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = BV->getSimpleValueType(0);
unsigned NumElts = VT.getVectorNumElements();
@@ -6193,11 +6379,11 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
// Count the number of UNDEF operands in the build_vector in input.
for (unsigned i = 0, e = Half; i != e; ++i)
- if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
+ if (BV->getOperand(i)->isUndef())
NumUndefsLO++;
for (unsigned i = Half, e = NumElts; i != e; ++i)
- if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
+ if (BV->getOperand(i)->isUndef())
NumUndefsHI++;
// Early exit if this is either a build_vector of all UNDEFs or all the
@@ -6207,14 +6393,14 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
SDLoc DL(BV);
SDValue InVec0, InVec1;
- if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
+ if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) {
// Try to match an SSE3 float HADD/HSUB.
if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
- } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
+ } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget.hasSSSE3()) {
// Try to match an SSSE3 integer HADD/HSUB.
if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
@@ -6223,7 +6409,7 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
}
- if (!Subtarget->hasAVX())
+ if (!Subtarget.hasAVX())
return SDValue();
if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
@@ -6232,18 +6418,14 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
SDValue InVec2, InVec3;
if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
- ((InVec0.getOpcode() == ISD::UNDEF ||
- InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
- ((InVec1.getOpcode() == ISD::UNDEF ||
- InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
+ ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
+ ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
- ((InVec0.getOpcode() == ISD::UNDEF ||
- InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
- ((InVec1.getOpcode() == ISD::UNDEF ||
- InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
+ ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
+ ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
} else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
// Try to match an AVX2 horizontal add/sub of signed integers.
@@ -6253,17 +6435,13 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
- ((InVec0.getOpcode() == ISD::UNDEF ||
- InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
- ((InVec1.getOpcode() == ISD::UNDEF ||
- InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
+ ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
+ ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
X86Opcode = X86ISD::HADD;
else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
- ((InVec0.getOpcode() == ISD::UNDEF ||
- InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
- ((InVec1.getOpcode() == ISD::UNDEF ||
- InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
+ ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
+ ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
X86Opcode = X86ISD::HSUB;
else
CanFold = false;
@@ -6271,7 +6449,7 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
if (CanFold) {
// Fold this build_vector into a single horizontal add/sub.
// Do this only if the target has AVX2.
- if (Subtarget->hasAVX2())
+ if (Subtarget.hasAVX2())
return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
// Do not try to expand this build_vector into a pair of horizontal
@@ -6289,7 +6467,7 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
}
if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
- VT == MVT::v16i16) && Subtarget->hasAVX()) {
+ VT == MVT::v16i16) && Subtarget.hasAVX()) {
unsigned X86Opcode;
if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
X86Opcode = X86ISD::HADD;
@@ -6318,39 +6496,101 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
return SDValue();
}
-SDValue
-X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
- SDLoc dl(Op);
-
+/// If a BUILD_VECTOR's source elements all apply the same bit operation and
+/// one of their operands is constant, lower to a pair of BUILD_VECTOR and
+/// just apply the bit to the vectors.
+/// NOTE: Its not in our interest to start make a general purpose vectorizer
+/// from this, but enough scalar bit operations are created from the later
+/// legalization + scalarization stages to need basic support.
+static SDValue lowerBuildVectorToBitOp(SDValue Op, SelectionDAG &DAG) {
+ SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
- MVT ExtVT = VT.getVectorElementType();
- unsigned NumElems = Op.getNumOperands();
+ unsigned NumElems = VT.getVectorNumElements();
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- // Generate vectors for predicate vectors.
- if (VT.getVectorElementType() == MVT::i1 && Subtarget->hasAVX512())
- return LowerBUILD_VECTORvXi1(Op, DAG);
+ // Check that all elements have the same opcode.
+ // TODO: Should we allow UNDEFS and if so how many?
+ unsigned Opcode = Op.getOperand(0).getOpcode();
+ for (unsigned i = 1; i < NumElems; ++i)
+ if (Opcode != Op.getOperand(i).getOpcode())
+ return SDValue();
- // Vectors containing all zeros can be matched by pxor and xorps later
+ // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
+ switch (Opcode) {
+ default:
+ return SDValue();
+ case ISD::AND:
+ case ISD::XOR:
+ case ISD::OR:
+ if (!TLI.isOperationLegalOrPromote(Opcode, VT))
+ return SDValue();
+ break;
+ }
+
+ SmallVector<SDValue, 4> LHSElts, RHSElts;
+ for (SDValue Elt : Op->ops()) {
+ SDValue LHS = Elt.getOperand(0);
+ SDValue RHS = Elt.getOperand(1);
+
+ // We expect the canonicalized RHS operand to be the constant.
+ if (!isa<ConstantSDNode>(RHS))
+ return SDValue();
+ LHSElts.push_back(LHS);
+ RHSElts.push_back(RHS);
+ }
+
+ SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
+ SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
+ return DAG.getNode(Opcode, DL, VT, LHS, RHS);
+}
+
+/// Create a vector constant without a load. SSE/AVX provide the bare minimum
+/// functionality to do this, so it's all zeros, all ones, or some derivation
+/// that is cheap to calculate.
+static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
+
+ // Vectors containing all zeros can be matched by pxor and xorps.
if (ISD::isBuildVectorAllZeros(Op.getNode())) {
// Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
// and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
return Op;
- return getZeroVector(VT, Subtarget, DAG, dl);
+ return getZeroVector(VT, Subtarget, DAG, DL);
}
// Vectors containing all ones can be matched by pcmpeqd on 128-bit width
// vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
// vpcmpeqd on 256-bit vectors.
- if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
- if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
+ if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
+ if (VT == MVT::v4i32 || VT == MVT::v16i32 ||
+ (VT == MVT::v8i32 && Subtarget.hasInt256()))
return Op;
- if (!VT.is512BitVector())
- return getOnesVector(VT, Subtarget, DAG, dl);
+ return getOnesVector(VT, Subtarget, DAG, DL);
}
+ return SDValue();
+}
+
+SDValue
+X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc dl(Op);
+
+ MVT VT = Op.getSimpleValueType();
+ MVT ExtVT = VT.getVectorElementType();
+ unsigned NumElems = Op.getNumOperands();
+
+ // Generate vectors for predicate vectors.
+ if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
+ return LowerBUILD_VECTORvXi1(Op, DAG);
+
+ if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
+ return VectorConstant;
+
BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
if (SDValue AddSub = LowerToAddSub(BV, Subtarget, DAG))
return AddSub;
@@ -6358,6 +6598,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
return HorizontalOp;
if (SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG))
return Broadcast;
+ if (SDValue BitOp = lowerBuildVectorToBitOp(Op, DAG))
+ return BitOp;
unsigned EVTBits = ExtVT.getSizeInBits();
@@ -6368,7 +6610,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
SmallSet<SDValue, 8> Values;
for (unsigned i = 0; i < NumElems; ++i) {
SDValue Elt = Op.getOperand(i);
- if (Elt.getOpcode() == ISD::UNDEF)
+ if (Elt.isUndef())
continue;
Values.insert(Elt);
if (Elt.getOpcode() != ISD::Constant &&
@@ -6397,7 +6639,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// insertion that way. Only do this if the value is non-constant or if the
// value is a constant being inserted into element 0. It is cheaper to do
// a constant pool load than it is to do a movd + shuffle.
- if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
+ if (ExtVT == MVT::i64 && !Subtarget.is64Bit() &&
(!IsAllConstants || Idx == 0)) {
if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
// Handle SSE only.
@@ -6422,7 +6664,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
- (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
+ (ExtVT == MVT::i64 && Subtarget.is64Bit())) {
if (VT.is512BitVector()) {
SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
@@ -6439,16 +6681,17 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// it to i32 first.
if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
- if (VT.is256BitVector()) {
- if (Subtarget->hasAVX()) {
- Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v8i32, Item);
+ if (VT.getSizeInBits() >= 256) {
+ MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
+ if (Subtarget.hasAVX()) {
+ Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
} else {
// Without AVX, we need to extend to a 128-bit vector and then
// insert into the 256-bit vector.
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
- SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
- Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
+ SDValue ZeroVec = getZeroVector(ShufVT, Subtarget, DAG, dl);
+ Item = insert128BitVector(ZeroVec, Item, 0, DAG, dl);
}
} else {
assert(VT.is128BitVector() && "Expected an SSE value type!");
@@ -6504,28 +6747,30 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (IsAllConstants)
return SDValue();
- // For AVX-length vectors, see if we can use a vector load to get all of the
- // elements, otherwise build the individual 128-bit pieces and use
+ // See if we can use a vector load to get all of the elements.
+ if (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) {
+ SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
+ if (SDValue LD = EltsFromConsecutiveLoads(VT, Ops, dl, DAG, false))
+ return LD;
+ }
+
+ // For AVX-length vectors, build the individual 128-bit pieces and use
// shuffles to put them in place.
if (VT.is256BitVector() || VT.is512BitVector()) {
- SmallVector<SDValue, 64> V(Op->op_begin(), Op->op_begin() + NumElems);
-
- // Check for a build vector of consecutive loads.
- if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
- return LD;
+ SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
// Build both the lower and upper subvector.
- SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
- makeArrayRef(&V[0], NumElems/2));
- SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
- makeArrayRef(&V[NumElems / 2], NumElems/2));
+ SDValue Lower =
+ DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElems / 2));
+ SDValue Upper = DAG.getBuildVector(
+ HVT, dl, makeArrayRef(&Ops[NumElems / 2], NumElems / 2));
// Recreate the wider vector with the lower and upper part.
if (VT.is256BitVector())
- return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
- return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
+ return concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
+ return concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
}
// Let legalizer expand 2-wide build_vectors.
@@ -6557,30 +6802,30 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
return V;
// If element VT is == 32 bits, turn it into a number of shuffles.
- SmallVector<SDValue, 8> V(NumElems);
if (NumElems == 4 && NumZero > 0) {
+ SmallVector<SDValue, 8> Ops(NumElems);
for (unsigned i = 0; i < 4; ++i) {
bool isZero = !(NonZeros & (1ULL << i));
if (isZero)
- V[i] = getZeroVector(VT, Subtarget, DAG, dl);
+ Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
else
- V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
+ Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
}
for (unsigned i = 0; i < 2; ++i) {
switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
default: break;
case 0:
- V[i] = V[i*2]; // Must be a zero vector.
+ Ops[i] = Ops[i*2]; // Must be a zero vector.
break;
case 1:
- V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
+ Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
break;
case 2:
- V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
+ Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
break;
case 3:
- V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
+ Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
break;
}
}
@@ -6593,32 +6838,24 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
static_cast<int>(Reverse2 ? NumElems : NumElems+1)
};
- return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
+ return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
}
if (Values.size() > 1 && VT.is128BitVector()) {
- // Check for a build vector of consecutive loads.
- for (unsigned i = 0; i < NumElems; ++i)
- V[i] = Op.getOperand(i);
-
- // Check for elements which are consecutive loads.
- if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
- return LD;
-
// Check for a build vector from mostly shuffle plus few inserting.
if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
return Sh;
// For SSE 4.1, use insertps to put the high elements into the low element.
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
SDValue Result;
- if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
+ if (!Op.getOperand(0).isUndef())
Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
else
Result = DAG.getUNDEF(VT);
for (unsigned i = 1; i < NumElems; ++i) {
- if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
+ if (Op.getOperand(i).isUndef()) continue;
Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
}
@@ -6628,11 +6865,12 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// Otherwise, expand into a number of unpckl*, start by extending each of
// our (non-undef) elements to the full vector width with the element in the
// bottom slot of the vector (which generates no code for SSE).
+ SmallVector<SDValue, 8> Ops(NumElems);
for (unsigned i = 0; i < NumElems; ++i) {
- if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
- V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
+ if (!Op.getOperand(i).isUndef())
+ Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
else
- V[i] = DAG.getUNDEF(VT);
+ Ops[i] = DAG.getUNDEF(VT);
}
// Next, we iteratively mix elements, e.g. for v4f32:
@@ -6642,20 +6880,20 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
unsigned EltStride = NumElems >> 1;
while (EltStride != 0) {
for (unsigned i = 0; i < EltStride; ++i) {
- // If V[i+EltStride] is undef and this is the first round of mixing,
+ // If Ops[i+EltStride] is undef and this is the first round of mixing,
// then it is safe to just drop this shuffle: V[i] is already in the
// right place, the one element (since it's the first round) being
// inserted as undef can be dropped. This isn't safe for successive
// rounds because they will permute elements within both vectors.
- if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
+ if (Ops[i+EltStride].isUndef() &&
EltStride == NumElems/2)
continue;
- V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
+ Ops[i] = getUnpackl(DAG, dl, VT, Ops[i], Ops[i + EltStride]);
}
EltStride >>= 1;
}
- return V[0];
+ return Ops[0];
}
return SDValue();
}
@@ -6673,21 +6911,23 @@ static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
SDValue V2 = Op.getOperand(1);
unsigned NumElems = ResVT.getVectorNumElements();
if (ResVT.is256BitVector())
- return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
+ return concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
if (Op.getNumOperands() == 4) {
MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(),
ResVT.getVectorNumElements()/2);
SDValue V3 = Op.getOperand(2);
SDValue V4 = Op.getOperand(3);
- return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
- Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
+ return concat256BitVectors(
+ concat128BitVectors(V1, V2, HalfVT, NumElems / 2, DAG, dl),
+ concat128BitVectors(V3, V4, HalfVT, NumElems / 2, DAG, dl), ResVT,
+ NumElems, DAG, dl);
}
- return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
+ return concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
}
static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG & DAG) {
SDLoc dl(Op);
MVT ResVT = Op.getSimpleValueType();
@@ -6764,7 +7004,7 @@ static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
}
static SDValue LowerCONCAT_VECTORS(SDValue Op,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
if (VT.getVectorElementType() == MVT::i1)
@@ -6800,24 +7040,11 @@ static SDValue LowerCONCAT_VECTORS(SDValue Op,
/// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
/// in-place shuffle are 'no-op's.
static bool isNoopShuffleMask(ArrayRef<int> Mask) {
- for (int i = 0, Size = Mask.size(); i < Size; ++i)
- if (Mask[i] != -1 && Mask[i] != i)
- return false;
- return true;
-}
-
-/// \brief Helper function to classify a mask as a single-input mask.
-///
-/// This isn't a generic single-input test because in the vector shuffle
-/// lowering we canonicalize single inputs to be the first input operand. This
-/// means we can more quickly test for a single input by only checking whether
-/// an input from the second operand exists. We also assume that the size of
-/// mask corresponds to the size of the input vectors which isn't true in the
-/// fully general case.
-static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
- for (int M : Mask)
- if (M >= (int)Mask.size())
+ for (int i = 0, Size = Mask.size(); i < Size; ++i) {
+ assert(Mask[i] >= -1 && "Out of bound mask element!");
+ if (Mask[i] >= 0 && Mask[i] != i)
return false;
+ }
return true;
}
@@ -6835,22 +7062,22 @@ static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
return false;
}
-/// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
+/// \brief Test whether a shuffle mask is equivalent within each sub-lane.
///
/// This checks a shuffle mask to see if it is performing the same
-/// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
+/// lane-relative shuffle in each sub-lane. This trivially implies
/// that it is also not lane-crossing. It may however involve a blend from the
/// same lane of a second vector.
///
/// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
/// non-trivial to compute in the face of undef lanes. The representation is
-/// *not* suitable for use with existing 128-bit shuffles as it will contain
-/// entries from both V1 and V2 inputs to the wider mask.
-static bool
-is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
- SmallVectorImpl<int> &RepeatedMask) {
- int LaneSize = 128 / VT.getScalarSizeInBits();
- RepeatedMask.resize(LaneSize, -1);
+/// suitable for use with existing 128-bit shuffles as entries from the second
+/// vector have been remapped to [LaneSize, 2*LaneSize).
+static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
+ ArrayRef<int> Mask,
+ SmallVectorImpl<int> &RepeatedMask) {
+ int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
+ RepeatedMask.assign(LaneSize, -1);
int Size = Mask.size();
for (int i = 0; i < Size; ++i) {
if (Mask[i] < 0)
@@ -6860,17 +7087,55 @@ is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
return false;
// Ok, handle the in-lane shuffles by detecting if and when they repeat.
- if (RepeatedMask[i % LaneSize] == -1)
+ // Adjust second vector indices to start at LaneSize instead of Size.
+ int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
+ : Mask[i] % LaneSize + LaneSize;
+ if (RepeatedMask[i % LaneSize] < 0)
// This is the first non-undef entry in this slot of a 128-bit lane.
- RepeatedMask[i % LaneSize] =
- Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
- else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
+ RepeatedMask[i % LaneSize] = LocalM;
+ else if (RepeatedMask[i % LaneSize] != LocalM)
// Found a mismatch with the repeated mask.
return false;
}
return true;
}
+/// Test whether a shuffle mask is equivalent within each 128-bit lane.
+static bool
+is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
+ SmallVectorImpl<int> &RepeatedMask) {
+ return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
+}
+
+/// Test whether a shuffle mask is equivalent within each 256-bit lane.
+static bool
+is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
+ SmallVectorImpl<int> &RepeatedMask) {
+ return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
+}
+
+static void scaleShuffleMask(int Scale, ArrayRef<int> Mask,
+ SmallVectorImpl<int> &ScaledMask) {
+ assert(0 < Scale && "Unexpected scaling factor");
+ int NumElts = Mask.size();
+ ScaledMask.assign(NumElts * Scale, -1);
+
+ for (int i = 0; i != NumElts; ++i) {
+ int M = Mask[i];
+
+ // Repeat sentinel values in every mask element.
+ if (M < 0) {
+ for (int s = 0; s != Scale; ++s)
+ ScaledMask[(Scale * i) + s] = M;
+ continue;
+ }
+
+ // Scale mask element and increment across each mask element.
+ for (int s = 0; s != Scale; ++s)
+ ScaledMask[(Scale * i) + s] = (Scale * M) + s;
+ }
+}
+
/// \brief Checks whether a shuffle mask is equivalent to an explicit list of
/// arguments.
///
@@ -6893,8 +7158,9 @@ static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
- for (int i = 0; i < Size; ++i)
- if (Mask[i] != -1 && Mask[i] != ExpectedMask[i]) {
+ for (int i = 0; i < Size; ++i) {
+ assert(Mask[i] >= -1 && "Out of bound mask element!");
+ if (Mask[i] >= 0 && Mask[i] != ExpectedMask[i]) {
auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
if (!MaskBV || !ExpectedBV ||
@@ -6902,6 +7168,32 @@ static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
ExpectedBV->getOperand(ExpectedMask[i] % Size))
return false;
}
+}
+
+ return true;
+}
+
+/// Checks whether a target shuffle mask is equivalent to an explicit pattern.
+///
+/// The masks must be exactly the same width.
+///
+/// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
+/// value in ExpectedMask is always accepted. Otherwise the indices must match.
+///
+/// SM_SentinelZero is accepted as a valid negative index but must match in both.
+static bool isTargetShuffleEquivalent(ArrayRef<int> Mask,
+ ArrayRef<int> ExpectedMask) {
+ int Size = Mask.size();
+ if (Size != (int)ExpectedMask.size())
+ return false;
+
+ for (int i = 0; i < Size; ++i)
+ if (Mask[i] == SM_SentinelUndef)
+ continue;
+ else if (Mask[i] < 0 && Mask[i] != SM_SentinelZero)
+ return false;
+ else if (Mask[i] != ExpectedMask[i])
+ return false;
return true;
}
@@ -6914,8 +7206,7 @@ static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
/// example.
///
/// NB: We rely heavily on "undef" masks preserving the input lane.
-static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, SDLoc DL,
- SelectionDAG &DAG) {
+static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
@@ -6923,11 +7214,16 @@ static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, SDLoc DL,
assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
unsigned Imm = 0;
- Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
- Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
- Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
- Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
- return DAG.getConstant(Imm, DL, MVT::i8);
+ Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
+ Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
+ Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
+ Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
+ return Imm;
+}
+
+static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, SDLoc DL,
+ SelectionDAG &DAG) {
+ return DAG.getConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
}
/// \brief Compute whether each element of a shuffle is zeroable.
@@ -6941,15 +7237,16 @@ static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, SDLoc DL,
static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
SDValue V1, SDValue V2) {
SmallBitVector Zeroable(Mask.size(), false);
-
- while (V1.getOpcode() == ISD::BITCAST)
- V1 = V1->getOperand(0);
- while (V2.getOpcode() == ISD::BITCAST)
- V2 = V2->getOperand(0);
+ V1 = peekThroughBitcasts(V1);
+ V2 = peekThroughBitcasts(V2);
bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
+ int VectorSizeInBits = V1.getValueType().getSizeInBits();
+ int ScalarSizeInBits = VectorSizeInBits / Mask.size();
+ assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
+
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
int M = Mask[i];
// Handle the easy cases.
@@ -6958,38 +7255,119 @@ static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
continue;
}
- // If this is an index into a build_vector node (which has the same number
- // of elements), dig out the input value and use it.
+ // Determine shuffle input and normalize the mask.
SDValue V = M < Size ? V1 : V2;
- if (V.getOpcode() != ISD::BUILD_VECTOR || Size != (int)V.getNumOperands())
+ M %= Size;
+
+ // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
+ if (V.getOpcode() != ISD::BUILD_VECTOR)
continue;
- SDValue Input = V.getOperand(M % Size);
- // The UNDEF opcode check really should be dead code here, but not quite
- // worth asserting on (it isn't invalid, just unexpected).
- if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
- Zeroable[i] = true;
+ // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
+ // the (larger) source element must be UNDEF/ZERO.
+ if ((Size % V.getNumOperands()) == 0) {
+ int Scale = Size / V->getNumOperands();
+ SDValue Op = V.getOperand(M / Scale);
+ if (Op.isUndef() || X86::isZeroNode(Op))
+ Zeroable[i] = true;
+ else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
+ APInt Val = Cst->getAPIntValue();
+ Val = Val.lshr((M % Scale) * ScalarSizeInBits);
+ Val = Val.getLoBits(ScalarSizeInBits);
+ Zeroable[i] = (Val == 0);
+ } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
+ APInt Val = Cst->getValueAPF().bitcastToAPInt();
+ Val = Val.lshr((M % Scale) * ScalarSizeInBits);
+ Val = Val.getLoBits(ScalarSizeInBits);
+ Zeroable[i] = (Val == 0);
+ }
+ continue;
+ }
+
+ // If the BUILD_VECTOR has more elements then all the (smaller) source
+ // elements must be UNDEF or ZERO.
+ if ((V.getNumOperands() % Size) == 0) {
+ int Scale = V->getNumOperands() / Size;
+ bool AllZeroable = true;
+ for (int j = 0; j < Scale; ++j) {
+ SDValue Op = V.getOperand((M * Scale) + j);
+ AllZeroable &= (Op.isUndef() || X86::isZeroNode(Op));
+ }
+ Zeroable[i] = AllZeroable;
+ continue;
+ }
}
return Zeroable;
}
+/// Try to lower a shuffle with a single PSHUFB of V1.
+/// This is only possible if V2 is unused (at all, or only for zero elements).
+static SDValue lowerVectorShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
+ ArrayRef<int> Mask, SDValue V1,
+ SDValue V2,
+ const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
+ int Size = Mask.size();
+ int LaneSize = 128 / VT.getScalarSizeInBits();
+ const int NumBytes = VT.getSizeInBits() / 8;
+ const int NumEltBytes = VT.getScalarSizeInBits() / 8;
+
+ assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
+ (Subtarget.hasAVX2() && VT.is256BitVector()) ||
+ (Subtarget.hasBWI() && VT.is512BitVector()));
+
+ SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
+
+ SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
+ // Sign bit set in i8 mask means zero element.
+ SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
+
+ for (int i = 0; i < NumBytes; ++i) {
+ int M = Mask[i / NumEltBytes];
+ if (M < 0) {
+ PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
+ continue;
+ }
+ if (Zeroable[i / NumEltBytes]) {
+ PSHUFBMask[i] = ZeroMask;
+ continue;
+ }
+ // Only allow V1.
+ if (M >= Size)
+ return SDValue();
+
+ // PSHUFB can't cross lanes, ensure this doesn't happen.
+ if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
+ return SDValue();
+
+ M = M % LaneSize;
+ M = M * NumEltBytes + (i % NumEltBytes);
+ PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
+ }
+
+ MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
+ return DAG.getBitcast(
+ VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V1),
+ DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
+}
+
// X86 has dedicated unpack instructions that can handle specific blend
// operations: UNPCKH and UNPCKL.
-static SDValue lowerVectorShuffleWithUNPCK(SDLoc DL, MVT VT, ArrayRef<int> Mask,
- SDValue V1, SDValue V2,
- SelectionDAG &DAG) {
+static SDValue lowerVectorShuffleWithUNPCK(const SDLoc &DL, MVT VT,
+ ArrayRef<int> Mask, SDValue V1,
+ SDValue V2, SelectionDAG &DAG) {
int NumElts = VT.getVectorNumElements();
int NumEltsInLane = 128 / VT.getScalarSizeInBits();
- SmallVector<int, 8> Unpckl;
- SmallVector<int, 8> Unpckh;
+ SmallVector<int, 8> Unpckl(NumElts);
+ SmallVector<int, 8> Unpckh(NumElts);
for (int i = 0; i < NumElts; ++i) {
unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
int LoPos = (i % NumEltsInLane) / 2 + LaneStart + NumElts * (i % 2);
int HiPos = LoPos + NumEltsInLane / 2;
- Unpckl.push_back(LoPos);
- Unpckh.push_back(HiPos);
+ Unpckl[i] = LoPos;
+ Unpckh[i] = HiPos;
}
if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
@@ -7013,7 +7391,7 @@ static SDValue lowerVectorShuffleWithUNPCK(SDLoc DL, MVT VT, ArrayRef<int> Mask,
///
/// This handles cases where we can model a blend exactly as a bitmask due to
/// one of the inputs being zeroable.
-static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
+static SDValue lowerVectorShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
SelectionDAG &DAG) {
MVT EltVT = VT.getVectorElementType();
@@ -7044,7 +7422,7 @@ static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
if (!V)
return SDValue(); // No non-zeroable elements!
- SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
+ SDValue VMask = DAG.getBuildVector(VT, DL, VMaskOps);
V = DAG.getNode(VT.isFloatingPoint()
? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
DL, VT, V, VMask);
@@ -7056,7 +7434,7 @@ static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
/// This is used as a fallback approach when first class blend instructions are
/// unavailable. Currently it is only suitable for integer vectors, but could
/// be generalized for floating point vectors if desirable.
-static SDValue lowerVectorShuffleAsBitBlend(SDLoc DL, MVT VT, SDValue V1,
+static SDValue lowerVectorShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
SelectionDAG &DAG) {
assert(VT.isInteger() && "Only supports integer vector types!");
@@ -7067,12 +7445,12 @@ static SDValue lowerVectorShuffleAsBitBlend(SDLoc DL, MVT VT, SDValue V1,
EltVT);
SmallVector<SDValue, 16> MaskOps;
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
- if (Mask[i] != -1 && Mask[i] != i && Mask[i] != i + Size)
+ if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
return SDValue(); // Shuffled input!
MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
}
- SDValue V1Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, MaskOps);
+ SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
// We have to cast V2 around.
MVT MaskVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
@@ -7088,9 +7466,9 @@ static SDValue lowerVectorShuffleAsBitBlend(SDLoc DL, MVT VT, SDValue V1,
/// these values. It relies on the availability of the X86ISD::BLENDI pattern to
/// be matched in the backend with the type given. What it does check for is
/// that the shuffle mask is a blend, or convertible into a blend with zero.
-static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
+static SDValue lowerVectorShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Original,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
@@ -7153,13 +7531,13 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
case MVT::v4i64:
case MVT::v8i32:
- assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
+ assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
// FALLTHROUGH
case MVT::v2i64:
case MVT::v4i32:
// If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
// that instruction.
- if (Subtarget->hasAVX2()) {
+ if (Subtarget.hasAVX2()) {
// Scale the blend by the number of 32-bit dwords per element.
int Scale = VT.getScalarSizeInBits() / 32;
BlendMask = ScaleBlendMask(BlendMask, Mask.size(), Scale);
@@ -7184,14 +7562,14 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
}
case MVT::v16i16: {
- assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
+ assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
SmallVector<int, 8> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
// We can lower these with PBLENDW which is mirrored across 128-bit lanes.
assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
BlendMask = 0;
for (int i = 0; i < 8; ++i)
- if (RepeatedMask[i] >= 16)
+ if (RepeatedMask[i] >= 8)
BlendMask |= 1u << i;
return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
DAG.getConstant(BlendMask, DL, MVT::i8));
@@ -7200,7 +7578,7 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
// FALLTHROUGH
case MVT::v16i8:
case MVT::v32i8: {
- assert((VT.is128BitVector() || Subtarget->hasAVX2()) &&
+ assert((VT.is128BitVector() || Subtarget.hasAVX2()) &&
"256-bit byte-blends require AVX2 support!");
// Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
@@ -7235,10 +7613,9 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
V1 = DAG.getBitcast(BlendVT, V1);
V2 = DAG.getBitcast(BlendVT, V2);
- return DAG.getBitcast(VT, DAG.getNode(ISD::VSELECT, DL, BlendVT,
- DAG.getNode(ISD::BUILD_VECTOR, DL,
- BlendVT, VSELECTMask),
- V1, V2));
+ return DAG.getBitcast(
+ VT, DAG.getNode(ISD::VSELECT, DL, BlendVT,
+ DAG.getBuildVector(BlendVT, DL, VSELECTMask), V1, V2));
}
default:
@@ -7251,8 +7628,8 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
///
/// This matches the pattern where we can blend elements from two inputs and
/// then reduce the shuffle to a single-input permutation.
-static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
- SDValue V2,
+static SDValue lowerVectorShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
+ SDValue V1, SDValue V2,
ArrayRef<int> Mask,
SelectionDAG &DAG) {
// We build up the blend mask while checking whether a blend is a viable way
@@ -7266,7 +7643,7 @@ static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
- if (BlendMask[Mask[i] % Size] == -1)
+ if (BlendMask[Mask[i] % Size] < 0)
BlendMask[Mask[i] % Size] = Mask[i];
else if (BlendMask[Mask[i] % Size] != Mask[i])
return SDValue(); // Can't blend in the needed input!
@@ -7285,8 +7662,8 @@ static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
/// shuffle+blend operations on newer X86 ISAs where we have very fast blend
/// operations. It will try to pick the best arrangement of shuffles and
/// blends.
-static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
- SDValue V1,
+static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(const SDLoc &DL,
+ MVT VT, SDValue V1,
SDValue V2,
ArrayRef<int> Mask,
SelectionDAG &DAG) {
@@ -7335,10 +7712,10 @@ static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
/// elements, and takes the low elements as the result. Note that while this is
/// specified as a *right shift* because x86 is little-endian, it is a *left
/// rotate* of the vector lanes.
-static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
- SDValue V2,
+static SDValue lowerVectorShuffleAsByteRotate(const SDLoc &DL, MVT VT,
+ SDValue V1, SDValue V2,
ArrayRef<int> Mask,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
@@ -7357,9 +7734,8 @@ static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
SDValue Lo, Hi;
for (int l = 0; l < NumElts; l += NumLaneElts) {
for (int i = 0; i < NumLaneElts; ++i) {
- if (Mask[l + i] == -1)
+ if (Mask[l + i] < 0)
continue;
- assert(Mask[l + i] >= 0 && "Only -1 is a valid negative mask element!");
// Get the mod-Size index and lane correct it.
int LaneIdx = (Mask[l + i] % NumElts) - l;
@@ -7411,19 +7787,22 @@ static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
else if (!Hi)
Hi = Lo;
+ // Cast the inputs to i8 vector of correct length to match PALIGNR or
+ // PSLLDQ/PSRLDQ.
+ MVT ByteVT = MVT::getVectorVT(MVT::i8, 16 * NumLanes);
+ Lo = DAG.getBitcast(ByteVT, Lo);
+ Hi = DAG.getBitcast(ByteVT, Hi);
+
// The actual rotate instruction rotates bytes, so we need to scale the
// rotation based on how many bytes are in the vector lane.
int Scale = 16 / NumLaneElts;
// SSSE3 targets can use the palignr instruction.
- if (Subtarget->hasSSSE3()) {
- // Cast the inputs to i8 vector of correct length to match PALIGNR.
- MVT AlignVT = MVT::getVectorVT(MVT::i8, 16 * NumLanes);
- Lo = DAG.getBitcast(AlignVT, Lo);
- Hi = DAG.getBitcast(AlignVT, Hi);
-
+ if (Subtarget.hasSSSE3()) {
+ assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
+ "512-bit PALIGNR requires BWI instructions");
return DAG.getBitcast(
- VT, DAG.getNode(X86ISD::PALIGNR, DL, AlignVT, Lo, Hi,
+ VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
DAG.getConstant(Rotation * Scale, DL, MVT::i8)));
}
@@ -7431,21 +7810,19 @@ static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
"Rotate-based lowering only supports 128-bit lowering!");
assert(Mask.size() <= 16 &&
"Can shuffle at most 16 bytes in a 128-bit vector!");
+ assert(ByteVT == MVT::v16i8 &&
+ "SSE2 rotate lowering only needed for v16i8!");
// Default SSE2 implementation
int LoByteShift = 16 - Rotation * Scale;
int HiByteShift = Rotation * Scale;
- // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
- Lo = DAG.getBitcast(MVT::v2i64, Lo);
- Hi = DAG.getBitcast(MVT::v2i64, Hi);
-
- SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
+ SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
DAG.getConstant(LoByteShift, DL, MVT::i8));
- SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
+ SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
DAG.getConstant(HiByteShift, DL, MVT::i8));
return DAG.getBitcast(VT,
- DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
+ DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
}
/// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
@@ -7471,8 +7848,9 @@ static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
/// [ 5, 6, 7, zz, zz, zz, zz, zz]
/// [ -1, 5, 6, 7, zz, zz, zz, zz]
/// [ 1, 2, -1, -1, -1, -1, zz, zz]
-static SDValue lowerVectorShuffleAsShift(SDLoc DL, MVT VT, SDValue V1,
+static SDValue lowerVectorShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
@@ -7510,7 +7888,8 @@ static SDValue lowerVectorShuffleAsShift(SDLoc DL, MVT VT, SDValue V1,
// We need to round trip through the appropriate type for the shift.
MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
- MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
+ MVT ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8)
+ : MVT::getVectorVT(ShiftSVT, Size / Scale);
assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
"Illegal integer vector type");
V = DAG.getBitcast(ShiftVT, V);
@@ -7526,7 +7905,8 @@ static SDValue lowerVectorShuffleAsShift(SDLoc DL, MVT VT, SDValue V1,
// their width within the elements of the larger integer vector. Test each
// multiple to see if we can find a match with the moved element indices
// and that the shifted in elements are all zeroable.
- for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 128; Scale *= 2)
+ unsigned MaxWidth = (VT.is512BitVector() && !Subtarget.hasBWI() ? 64 : 128);
+ for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= MaxWidth; Scale *= 2)
for (int Shift = 1; Shift != Scale; ++Shift)
for (bool Left : {true, false})
if (CheckZeros(Shift, Scale, Left))
@@ -7539,7 +7919,7 @@ static SDValue lowerVectorShuffleAsShift(SDLoc DL, MVT VT, SDValue V1,
}
/// \brief Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
-static SDValue lowerVectorShuffleWithSSE4A(SDLoc DL, MVT VT, SDValue V1,
+static SDValue lowerVectorShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
SelectionDAG &DAG) {
SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
@@ -7679,8 +8059,8 @@ static SDValue lowerVectorShuffleWithSSE4A(SDLoc DL, MVT VT, SDValue V1,
/// or at the start of a higher lane. All extended elements must be from
/// the same lane.
static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
- SDLoc DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
- ArrayRef<int> Mask, const X86Subtarget *Subtarget, SelectionDAG &DAG) {
+ const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
+ ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
assert(Scale > 1 && "Need a scale to extend.");
int EltBits = VT.getScalarSizeInBits();
int NumElements = VT.getVectorNumElements();
@@ -7713,14 +8093,20 @@ static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
// Found a valid zext mask! Try various lowering strategies based on the
// input type and available ISA extensions.
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
// Not worth offseting 128-bit vectors if scale == 2, a pattern using
// PUNPCK will catch this in a later shuffle match.
if (Offset && Scale == 2 && VT.is128BitVector())
return SDValue();
MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
NumElements / Scale);
- InputV = DAG.getNode(X86ISD::VZEXT, DL, ExtVT, ShuffleOffset(InputV));
+ InputV = ShuffleOffset(InputV);
+
+ // For 256-bit vectors, we only need the lower (128-bit) input half.
+ if (VT.is256BitVector())
+ InputV = extract128BitVector(InputV, 0, DAG, DL);
+
+ InputV = DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV);
return DAG.getBitcast(VT, InputV);
}
@@ -7752,33 +8138,33 @@ static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
// The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
// to 64-bits.
- if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget->hasSSE4A()) {
+ if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
assert(VT.is128BitVector() && "Unexpected vector width!");
int LoIdx = Offset * EltBits;
- SDValue Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
- DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
- DAG.getConstant(EltBits, DL, MVT::i8),
- DAG.getConstant(LoIdx, DL, MVT::i8)));
+ SDValue Lo = DAG.getBitcast(
+ MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
+ DAG.getConstant(EltBits, DL, MVT::i8),
+ DAG.getConstant(LoIdx, DL, MVT::i8)));
if (isUndefInRange(Mask, NumElements / 2, NumElements / 2) ||
!SafeOffset(Offset + 1))
- return DAG.getNode(ISD::BITCAST, DL, VT, Lo);
+ return DAG.getBitcast(VT, Lo);
int HiIdx = (Offset + 1) * EltBits;
- SDValue Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
- DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
- DAG.getConstant(EltBits, DL, MVT::i8),
- DAG.getConstant(HiIdx, DL, MVT::i8)));
- return DAG.getNode(ISD::BITCAST, DL, VT,
- DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
+ SDValue Hi = DAG.getBitcast(
+ MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
+ DAG.getConstant(EltBits, DL, MVT::i8),
+ DAG.getConstant(HiIdx, DL, MVT::i8)));
+ return DAG.getBitcast(VT,
+ DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
}
// If this would require more than 2 unpack instructions to expand, use
// pshufb when available. We can only use more than 2 unpack instructions
// when zero extending i8 elements which also makes it easier to use pshufb.
- if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
+ if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
assert(NumElements == 16 && "Unexpected byte vector width!");
SDValue PSHUFBMask[16];
for (int i = 0; i < 16; ++i) {
@@ -7787,10 +8173,9 @@ static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
(i % Scale == 0 && SafeOffset(Idx)) ? Idx : 0x80, DL, MVT::i8);
}
InputV = DAG.getBitcast(MVT::v16i8, InputV);
- return DAG.getBitcast(VT,
- DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
- DAG.getNode(ISD::BUILD_VECTOR, DL,
- MVT::v16i8, PSHUFBMask)));
+ return DAG.getBitcast(
+ VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
+ DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
}
// If we are extending from an offset, ensure we start on a boundary that
@@ -7837,8 +8222,8 @@ static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
/// The reason we have dedicated lowering for zext-style shuffles is that they
/// are both incredibly common and often quite performance sensitive.
static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
- SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
- const X86Subtarget *Subtarget, SelectionDAG &DAG) {
+ const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
+ const X86Subtarget &Subtarget, SelectionDAG &DAG) {
SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
int Bits = VT.getSizeInBits();
@@ -7858,7 +8243,7 @@ static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
int Matches = 0;
for (int i = 0; i < NumElements; ++i) {
int M = Mask[i];
- if (M == -1)
+ if (M < 0)
continue; // Valid anywhere but doesn't tell us anything.
if (i % Scale != 0) {
// Each of the extended elements need to be zeroable.
@@ -7960,8 +8345,8 @@ static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
SelectionDAG &DAG) {
MVT VT = V.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
- while (V.getOpcode() == ISD::BITCAST)
- V = V.getOperand(0);
+ V = peekThroughBitcasts(V);
+
// If the bitcasts shift the element size, we can't extract an equivalent
// element from it.
MVT NewVT = V.getSimpleValueType();
@@ -7974,7 +8359,7 @@ static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
// FIXME: Add support for scalar truncation where possible.
SDValue S = V.getOperand(Idx);
if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
- return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, S);
+ return DAG.getBitcast(EltVT, S);
}
return SDValue();
@@ -7985,9 +8370,7 @@ static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
/// This is particularly important because the set of instructions varies
/// significantly based on whether the operand is a load or not.
static bool isShuffleFoldableLoad(SDValue V) {
- while (V.getOpcode() == ISD::BITCAST)
- V = V.getOperand(0);
-
+ V = peekThroughBitcasts(V);
return ISD::isNON_EXTLoad(V.getNode());
}
@@ -7996,8 +8379,8 @@ static bool isShuffleFoldableLoad(SDValue V) {
/// This is a common pattern that we have especially efficient patterns to lower
/// across all subtarget feature sets.
static SDValue lowerVectorShuffleAsElementInsertion(
- SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
- const X86Subtarget *Subtarget, SelectionDAG &DAG) {
+ const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
+ const X86Subtarget &Subtarget, SelectionDAG &DAG) {
SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
MVT ExtVT = VT;
MVT EltVT = VT.getVectorElementType();
@@ -8054,7 +8437,7 @@ static SDValue lowerVectorShuffleAsElementInsertion(
// This is essentially a special case blend operation, but if we have
// general purpose blend operations, they are always faster. Bail and let
// the rest of the lowering handle these as blends.
- if (Subtarget->hasSSE41())
+ if (Subtarget.hasSSE41())
return SDValue();
// Otherwise, use MOVSD or MOVSS.
@@ -8082,9 +8465,9 @@ static SDValue lowerVectorShuffleAsElementInsertion(
V2Shuffle[V2Index] = 0;
V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
} else {
- V2 = DAG.getBitcast(MVT::v2i64, V2);
+ V2 = DAG.getBitcast(MVT::v16i8, V2);
V2 = DAG.getNode(
- X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
+ X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
DAG.getConstant(V2Index * EltVT.getSizeInBits() / 8, DL,
DAG.getTargetLoweringInfo().getScalarShiftAmountTy(
DAG.getDataLayout(), VT)));
@@ -8094,15 +8477,15 @@ static SDValue lowerVectorShuffleAsElementInsertion(
return V2;
}
-/// \brief Try to lower broadcast of a single - truncated - integer element,
+/// Try to lower broadcast of a single - truncated - integer element,
/// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
///
/// This assumes we have AVX2.
-static SDValue lowerVectorShuffleAsTruncBroadcast(SDLoc DL, MVT VT, SDValue V0,
- int BroadcastIdx,
- const X86Subtarget *Subtarget,
+static SDValue lowerVectorShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT,
+ SDValue V0, int BroadcastIdx,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- assert(Subtarget->hasAVX2() &&
+ assert(Subtarget.hasAVX2() &&
"We can only lower integer broadcasts with AVX2!");
EVT EltVT = VT.getVectorElementType();
@@ -8153,38 +8536,57 @@ static SDValue lowerVectorShuffleAsTruncBroadcast(SDLoc DL, MVT VT, SDValue V0,
/// filtering. While a little annoying to re-dispatch on type here, there isn't
/// a convenient way to factor it out.
/// FIXME: This is very similar to LowerVectorBroadcast - can we merge them?
-static SDValue lowerVectorShuffleAsBroadcast(SDLoc DL, MVT VT, SDValue V,
+static SDValue lowerVectorShuffleAsBroadcast(const SDLoc &DL, MVT VT,
+ SDValue V1, SDValue V2,
ArrayRef<int> Mask,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- if (!Subtarget->hasAVX())
- return SDValue();
- if (VT.isInteger() && !Subtarget->hasAVX2())
+ if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
+ (Subtarget.hasAVX() && VT.isFloatingPoint()) ||
+ (Subtarget.hasAVX2() && VT.isInteger())))
return SDValue();
+ // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
+ // we can only broadcast from a register with AVX2.
+ unsigned NumElts = Mask.size();
+ unsigned Opcode = VT == MVT::v2f64 ? X86ISD::MOVDDUP : X86ISD::VBROADCAST;
+ bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
+
// Check that the mask is a broadcast.
int BroadcastIdx = -1;
- for (int M : Mask)
- if (M >= 0 && BroadcastIdx == -1)
- BroadcastIdx = M;
- else if (M >= 0 && M != BroadcastIdx)
- return SDValue();
+ for (int i = 0; i != (int)NumElts; ++i) {
+ SmallVector<int, 8> BroadcastMask(NumElts, i);
+ if (isShuffleEquivalent(V1, V2, Mask, BroadcastMask)) {
+ BroadcastIdx = i;
+ break;
+ }
+ }
+ if (BroadcastIdx < 0)
+ return SDValue();
assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
"a sorted mask where the broadcast "
"comes from V1.");
// Go up the chain of (vector) values to find a scalar load that we can
// combine with the broadcast.
+ SDValue V = V1;
for (;;) {
switch (V.getOpcode()) {
+ case ISD::BITCAST: {
+ SDValue VSrc = V.getOperand(0);
+ MVT SrcVT = VSrc.getSimpleValueType();
+ if (VT.getScalarSizeInBits() != SrcVT.getScalarSizeInBits())
+ break;
+ V = VSrc;
+ continue;
+ }
case ISD::CONCAT_VECTORS: {
int OperandSize = Mask.size() / V.getNumOperands();
V = V.getOperand(BroadcastIdx / OperandSize);
BroadcastIdx %= OperandSize;
continue;
}
-
case ISD::INSERT_SUBVECTOR: {
SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
@@ -8219,45 +8621,76 @@ static SDValue lowerVectorShuffleAsBroadcast(SDLoc DL, MVT VT, SDValue V,
MVT BroadcastVT = VT;
// Peek through any bitcast (only useful for loads).
- SDValue BC = V;
- while (BC.getOpcode() == ISD::BITCAST)
- BC = BC.getOperand(0);
+ SDValue BC = peekThroughBitcasts(V);
// Also check the simpler case, where we can directly reuse the scalar.
if (V.getOpcode() == ISD::BUILD_VECTOR ||
(V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
V = V.getOperand(BroadcastIdx);
- // If the scalar isn't a load, we can't broadcast from it in AVX1.
- // Only AVX2 has register broadcasts.
- if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
+ // If we can't broadcast from a register, check that the input is a load.
+ if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
return SDValue();
} else if (MayFoldLoad(BC) && !cast<LoadSDNode>(BC)->isVolatile()) {
// 32-bit targets need to load i64 as a f64 and then bitcast the result.
- if (!Subtarget->is64Bit() && VT.getScalarType() == MVT::i64)
+ if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) {
BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements());
+ Opcode = (BroadcastVT.is128BitVector() ? X86ISD::MOVDDUP : Opcode);
+ }
// If we are broadcasting a load that is only used by the shuffle
// then we can reduce the vector load to the broadcasted scalar load.
LoadSDNode *Ld = cast<LoadSDNode>(BC);
SDValue BaseAddr = Ld->getOperand(1);
- EVT AddrVT = BaseAddr.getValueType();
EVT SVT = BroadcastVT.getScalarType();
unsigned Offset = BroadcastIdx * SVT.getStoreSize();
- SDValue NewAddr = DAG.getNode(
- ISD::ADD, DL, AddrVT, BaseAddr,
- DAG.getConstant(Offset, DL, AddrVT));
+ SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
DAG.getMachineFunction().getMachineMemOperand(
Ld->getMemOperand(), Offset, SVT.getStoreSize()));
- } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
- // We can't broadcast from a vector register without AVX2, and we can only
- // broadcast from the zero-element of a vector register.
+ } else if (!BroadcastFromReg) {
+ // We can't broadcast from a vector register.
return SDValue();
+ } else if (BroadcastIdx != 0) {
+ // We can only broadcast from the zero-element of a vector register,
+ // but it can be advantageous to broadcast from the zero-element of a
+ // subvector.
+ if (!VT.is256BitVector() && !VT.is512BitVector())
+ return SDValue();
+
+ // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
+ if (VT == MVT::v4f64 || VT == MVT::v4i64)
+ return SDValue();
+
+ // Only broadcast the zero-element of a 128-bit subvector.
+ unsigned EltSize = VT.getScalarSizeInBits();
+ if (((BroadcastIdx * EltSize) % 128) != 0)
+ return SDValue();
+
+ MVT ExtVT = MVT::getVectorVT(VT.getScalarType(), 128 / EltSize);
+ V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtVT, V,
+ DAG.getIntPtrConstant(BroadcastIdx, DL));
}
- V = DAG.getNode(X86ISD::VBROADCAST, DL, BroadcastVT, V);
- return DAG.getBitcast(VT, V);
+ if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector())
+ V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
+ DAG.getBitcast(MVT::f64, V));
+
+ // Bitcast back to the same scalar type as BroadcastVT.
+ MVT SrcVT = V.getSimpleValueType();
+ if (SrcVT.getScalarType() != BroadcastVT.getScalarType()) {
+ assert(SrcVT.getScalarSizeInBits() == BroadcastVT.getScalarSizeInBits() &&
+ "Unexpected vector element size");
+ if (SrcVT.isVector()) {
+ unsigned NumSrcElts = SrcVT.getVectorNumElements();
+ SrcVT = MVT::getVectorVT(BroadcastVT.getScalarType(), NumSrcElts);
+ } else {
+ SrcVT = BroadcastVT.getScalarType();
+ }
+ V = DAG.getBitcast(SrcVT, V);
+ }
+
+ return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
}
// Check for whether we can use INSERTPS to perform the shuffle. We only use
@@ -8266,16 +8699,14 @@ static SDValue lowerVectorShuffleAsBroadcast(SDLoc DL, MVT VT, SDValue V,
// are much smaller to encode than a SHUFPS and an INSERTPS. We can also
// perform INSERTPS if a single V1 element is out of place and all V2
// elements are zeroable.
-static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
- ArrayRef<int> Mask,
- SelectionDAG &DAG) {
- assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
- assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
- assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
+static bool matchVectorShuffleAsInsertPS(SDValue &V1, SDValue &V2,
+ unsigned &InsertPSMask,
+ const SmallBitVector &Zeroable,
+ ArrayRef<int> Mask,
+ SelectionDAG &DAG) {
+ assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
+ assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
-
- SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
-
unsigned ZMask = 0;
int V1DstIndex = -1;
int V2DstIndex = -1;
@@ -8295,8 +8726,8 @@ static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
}
// We can only insert a single non-zeroable element.
- if (V1DstIndex != -1 || V2DstIndex != -1)
- return SDValue();
+ if (V1DstIndex >= 0 || V2DstIndex >= 0)
+ return false;
if (Mask[i] < 4) {
// V1 input out of place for insertion.
@@ -8308,13 +8739,13 @@ static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
}
// Don't bother if we have no (non-zeroable) element for insertion.
- if (V1DstIndex == -1 && V2DstIndex == -1)
- return SDValue();
+ if (V1DstIndex < 0 && V2DstIndex < 0)
+ return false;
// Determine element insertion src/dst indices. The src index is from the
// start of the inserted vector, not the start of the concatenated vector.
unsigned V2SrcIndex = 0;
- if (V1DstIndex != -1) {
+ if (V1DstIndex >= 0) {
// If we have a V1 input out of place, we use V1 as the V2 element insertion
// and don't use the original V2 at all.
V2SrcIndex = Mask[V1DstIndex];
@@ -8329,11 +8760,25 @@ static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
if (!V1UsedInPlace)
V1 = DAG.getUNDEF(MVT::v4f32);
- unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
+ // Insert the V2 element into the desired position.
+ InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
+ return true;
+}
+
+static SDValue lowerVectorShuffleAsInsertPS(const SDLoc &DL, SDValue V1,
+ SDValue V2, ArrayRef<int> Mask,
+ SelectionDAG &DAG) {
+ assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
+ assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
+ SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
+
+ // Attempt to match the insertps pattern.
+ unsigned InsertPSMask;
+ if (!matchVectorShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
+ return SDValue();
// Insert the V2 element into the desired position.
- SDLoc DL(Op);
return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
DAG.getConstant(InsertPSMask, DL, MVT::i8));
}
@@ -8347,29 +8792,30 @@ static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
/// because for floating point vectors we have a generalized SHUFPS lowering
/// strategy that handles everything that doesn't *exactly* match an unpack,
/// making this clever lowering unnecessary.
-static SDValue lowerVectorShuffleAsPermuteAndUnpack(SDLoc DL, MVT VT,
+static SDValue lowerVectorShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
SelectionDAG &DAG) {
assert(!VT.isFloatingPoint() &&
"This routine only supports integer vectors.");
- assert(!isSingleInputShuffleMask(Mask) &&
+ assert(VT.is128BitVector() &&
+ "This routine only works on 128-bit vectors.");
+ assert(!V2.isUndef() &&
"This routine should only be used when blending two inputs.");
assert(Mask.size() >= 2 && "Single element masks are invalid.");
int Size = Mask.size();
- int NumLoInputs = std::count_if(Mask.begin(), Mask.end(), [Size](int M) {
- return M >= 0 && M % Size < Size / 2;
- });
- int NumHiInputs = std::count_if(
- Mask.begin(), Mask.end(), [Size](int M) { return M % Size >= Size / 2; });
+ int NumLoInputs =
+ count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
+ int NumHiInputs =
+ count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
bool UnpackLo = NumLoInputs >= NumHiInputs;
- auto TryUnpack = [&](MVT UnpackVT, int Scale) {
- SmallVector<int, 32> V1Mask(Mask.size(), -1);
- SmallVector<int, 32> V2Mask(Mask.size(), -1);
+ auto TryUnpack = [&](int ScalarSize, int Scale) {
+ SmallVector<int, 16> V1Mask((unsigned)Size, -1);
+ SmallVector<int, 16> V2Mask((unsigned)Size, -1);
for (int i = 0; i < Size; ++i) {
if (Mask[i] < 0)
@@ -8401,6 +8847,7 @@ static SDValue lowerVectorShuffleAsPermuteAndUnpack(SDLoc DL, MVT VT,
V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
// Cast the inputs to the type we will use to unpack them.
+ MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
V1 = DAG.getBitcast(UnpackVT, V1);
V2 = DAG.getBitcast(UnpackVT, V2);
@@ -8412,15 +8859,10 @@ static SDValue lowerVectorShuffleAsPermuteAndUnpack(SDLoc DL, MVT VT,
// We try each unpack from the largest to the smallest to try and find one
// that fits this mask.
- int OrigNumElements = VT.getVectorNumElements();
int OrigScalarSize = VT.getScalarSizeInBits();
- for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2) {
- int Scale = ScalarSize / OrigScalarSize;
- int NumElements = OrigNumElements / Scale;
- MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), NumElements);
- if (SDValue Unpack = TryUnpack(UnpackVT, Scale))
+ for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
+ if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
return Unpack;
- }
// If none of the unpack-rooted lowerings worked (or were profitable) try an
// initial unpack.
@@ -8434,8 +8876,7 @@ static SDValue lowerVectorShuffleAsPermuteAndUnpack(SDLoc DL, MVT VT,
// half-crossings are created.
// FIXME: We could consider commuting the unpacks.
- SmallVector<int, 32> PermMask;
- PermMask.assign(Size, -1);
+ SmallVector<int, 32> PermMask((unsigned)Size, -1);
for (int i = 0; i < Size; ++i) {
if (Mask[i] < 0)
continue;
@@ -8461,28 +8902,25 @@ static SDValue lowerVectorShuffleAsPermuteAndUnpack(SDLoc DL, MVT VT,
/// instructions will incur a domain crossing penalty on some chips though so
/// it is better to avoid lowering through this for integer vectors where
/// possible.
-static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV2F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
- if (isSingleInputShuffleMask(Mask)) {
- // Use low duplicate instructions for masks that match their pattern.
- if (Subtarget->hasSSE3())
- if (isShuffleEquivalent(V1, V2, Mask, {0, 0}))
- return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
+ if (V2.isUndef()) {
+ // Check for being able to broadcast a single element.
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
+ DL, MVT::v2f64, V1, V2, Mask, Subtarget, DAG))
+ return Broadcast;
// Straight shuffle of a single input vector. Simulate this by using the
// single input as both of the "inputs" to this instruction..
unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
- if (Subtarget->hasAVX()) {
+ if (Subtarget.hasAVX()) {
// If we have AVX, we can use VPERMILPS which will allow folding a load
// into the shuffle.
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
@@ -8521,7 +8959,7 @@ static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
DL, MVT::v2f64, V2,
DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
- if (Subtarget->hasSSE41())
+ if (Subtarget.hasSSE41())
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
Subtarget, DAG))
return Blend;
@@ -8542,21 +8980,18 @@ static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
/// the integer unit to minimize domain crossing penalties. However, for blends
/// it falls back to the floating point shuffle operation with appropriate bit
/// casting.
-static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV2I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
- if (isSingleInputShuffleMask(Mask)) {
+ if (V2.isUndef()) {
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v2i64, V1,
- Mask, Subtarget, DAG))
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
+ DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Straight shuffle of a single input vector. For everything from SSE2
@@ -8576,28 +9011,29 @@ static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
assert(Mask[0] < 2 && "We sort V1 to be the first input.");
assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
- // If we have a blend of two PACKUS operations an the blend aligns with the
- // low and half halves, we can just merge the PACKUS operations. This is
- // particularly important as it lets us merge shuffles that this routine itself
- // creates.
+ // If we have a blend of two same-type PACKUS operations and the blend aligns
+ // with the low and high halves, we can just merge the PACKUS operations.
+ // This is particularly important as it lets us merge shuffles that this
+ // routine itself creates.
auto GetPackNode = [](SDValue V) {
- while (V.getOpcode() == ISD::BITCAST)
- V = V.getOperand(0);
-
+ V = peekThroughBitcasts(V);
return V.getOpcode() == X86ISD::PACKUS ? V : SDValue();
};
if (SDValue V1Pack = GetPackNode(V1))
- if (SDValue V2Pack = GetPackNode(V2))
- return DAG.getBitcast(MVT::v2i64,
- DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8,
- Mask[0] == 0 ? V1Pack.getOperand(0)
- : V1Pack.getOperand(1),
- Mask[1] == 2 ? V2Pack.getOperand(0)
- : V2Pack.getOperand(1)));
+ if (SDValue V2Pack = GetPackNode(V2)) {
+ EVT PackVT = V1Pack.getValueType();
+ if (PackVT == V2Pack.getValueType())
+ return DAG.getBitcast(MVT::v2i64,
+ DAG.getNode(X86ISD::PACKUS, DL, PackVT,
+ Mask[0] == 0 ? V1Pack.getOperand(0)
+ : V1Pack.getOperand(1),
+ Mask[1] == 2 ? V2Pack.getOperand(0)
+ : V2Pack.getOperand(1)));
+ }
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
+ Subtarget, DAG))
return Shift;
// When loading a scalar and then shuffling it into a vector we can often do
@@ -8614,7 +9050,7 @@ static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// We have different paths for blend lowering, but they all must use the
// *exact* same predicate.
- bool IsBlendSupported = Subtarget->hasSSE41();
+ bool IsBlendSupported = Subtarget.hasSSE41();
if (IsBlendSupported)
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
Subtarget, DAG))
@@ -8627,7 +9063,7 @@ static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// Try to use byte rotation instructions.
// Its more profitable for pre-SSSE3 to use shuffles/unpacks.
- if (Subtarget->hasSSSE3())
+ if (Subtarget.hasSSSE3())
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
return Rotate;
@@ -8655,12 +9091,16 @@ static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
// This routine only handles 128-bit shufps.
assert(Mask.size() == 4 && "Unsupported mask size!");
+ assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
+ assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
+ assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
+ assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
// To lower with a single SHUFPS we need to have the low half and high half
// each requiring a single input.
- if (Mask[0] != -1 && Mask[1] != -1 && (Mask[0] < 4) != (Mask[1] < 4))
+ if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
return false;
- if (Mask[2] != -1 && Mask[3] != -1 && (Mask[2] < 4) != (Mask[3] < 4))
+ if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
return false;
return true;
@@ -8671,14 +9111,13 @@ static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
/// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
/// It makes no assumptions about whether this is the *best* lowering, it simply
/// uses it.
-static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
+static SDValue lowerVectorShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG) {
SDValue LowV = V1, HighV = V2;
int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
- int NumV2Elements =
- std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
+ int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
if (NumV2Elements == 1) {
int V2Index =
@@ -8689,7 +9128,7 @@ static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
// the low bit.
int V2AdjIndex = V2Index ^ 1;
- if (Mask[V2AdjIndex] == -1) {
+ if (Mask[V2AdjIndex] < 0) {
// Handles all the cases where we have a single V2 element and an undef.
// This will only ever happen in the high lanes because we commute the
// vector otherwise.
@@ -8761,35 +9200,31 @@ static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
/// Uses instructions exclusively from the floating point unit to minimize
/// domain crossing penalties, as these are sufficient to implement all v4f32
/// shuffles.
-static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV4F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
- int NumV2Elements =
- std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
+ int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
if (NumV2Elements == 0) {
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4f32, V1,
- Mask, Subtarget, DAG))
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
+ DL, MVT::v4f32, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Use even/odd duplicate instructions for masks that match their pattern.
- if (Subtarget->hasSSE3()) {
+ if (Subtarget.hasSSE3()) {
if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
}
- if (Subtarget->hasAVX()) {
+ if (Subtarget.hasAVX()) {
// If we have AVX, we can use VPERMILPS which will allow folding a load
// into the shuffle.
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
@@ -8812,13 +9247,13 @@ static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
Mask, Subtarget, DAG))
return V;
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
Subtarget, DAG))
return Blend;
// Use INSERTPS if we can complete the shuffle efficiently.
- if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
+ if (SDValue V = lowerVectorShuffleAsInsertPS(DL, V1, V2, Mask, DAG))
return V;
if (!isSingleSHUFPSMask(Mask))
@@ -8827,6 +9262,12 @@ static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return BlendPerm;
}
+ // Use low/high mov instructions.
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5}))
+ return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 6, 7}))
+ return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
+
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
@@ -8840,15 +9281,12 @@ static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
///
/// We try to handle these with integer-domain shuffles where we can, but for
/// blends we use the floating point domain blend instructions.
-static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV4I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
// Whenever we can lower this as a zext, that instruction is strictly faster
@@ -8858,13 +9296,12 @@ static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
Mask, Subtarget, DAG))
return ZExt;
- int NumV2Elements =
- std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
+ int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
if (NumV2Elements == 0) {
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4i32, V1,
- Mask, Subtarget, DAG))
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
+ DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Straight shuffle of a single input vector. For everything from SSE2
@@ -8884,8 +9321,8 @@ static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
}
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
+ Subtarget, DAG))
return Shift;
// There are special ways we can lower some single-element blends.
@@ -8896,7 +9333,7 @@ static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// We have different paths for blend lowering, but they all must use the
// *exact* same predicate.
- bool IsBlendSupported = Subtarget->hasSSE41();
+ bool IsBlendSupported = Subtarget.hasSSE41();
if (IsBlendSupported)
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
Subtarget, DAG))
@@ -8913,7 +9350,7 @@ static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// Try to use byte rotation instructions.
// Its more profitable for pre-SSSE3 to use shuffles/unpacks.
- if (Subtarget->hasSSSE3())
+ if (Subtarget.hasSSSE3())
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
return Rotate;
@@ -8957,8 +9394,8 @@ static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
/// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
/// vector, form the analogous 128-bit 8-element Mask.
static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
- SDLoc DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
- const X86Subtarget *Subtarget, SelectionDAG &DAG) {
+ const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
+ const X86Subtarget &Subtarget, SelectionDAG &DAG) {
assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
@@ -8987,6 +9424,26 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
+ // If we are splatting two values from one half - one to each half, then
+ // we can shuffle that half so each is splatted to a dword, then splat those
+ // to their respective halves.
+ auto SplatHalfs = [&](int LoInput, int HiInput, unsigned ShufWOp,
+ int DOffset) {
+ int PSHUFHalfMask[] = {LoInput % 4, LoInput % 4, HiInput % 4, HiInput % 4};
+ int PSHUFDMask[] = {DOffset + 0, DOffset + 0, DOffset + 1, DOffset + 1};
+ V = DAG.getNode(ShufWOp, DL, VT, V,
+ getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
+ V = DAG.getBitcast(PSHUFDVT, V);
+ V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
+ getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
+ return DAG.getBitcast(VT, V);
+ };
+
+ if (NumLToL == 1 && NumLToH == 1 && (NumHToL + NumHToH) == 0)
+ return SplatHalfs(LToLInputs[0], LToHInputs[0], X86ISD::PSHUFLW, 0);
+ if (NumHToL == 1 && NumHToH == 1 && (NumLToL + NumLToH) == 0)
+ return SplatHalfs(HToLInputs[0], HToHInputs[0], X86ISD::PSHUFHW, 2);
+
// Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
// such inputs we can swap two of the dwords across the half mark and end up
// with <=2 inputs to each half in each half. Once there, we can fall through
@@ -9096,9 +9553,9 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
for (int &M : Mask)
- if (M != -1 && M == FixIdx)
+ if (M >= 0 && M == FixIdx)
M = FixFreeIdx;
- else if (M != -1 && M == FixFreeIdx)
+ else if (M >= 0 && M == FixFreeIdx)
M = FixIdx;
};
if (NumFlippedBToBInputs != 0) {
@@ -9123,9 +9580,9 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
// Adjust the mask to match the new locations of A and B.
for (int &M : Mask)
- if (M != -1 && M/2 == ADWord)
+ if (M >= 0 && M/2 == ADWord)
M = 2 * BDWord + M % 2;
- else if (M != -1 && M/2 == BDWord)
+ else if (M >= 0 && M/2 == BDWord)
M = 2 * ADWord + M % 2;
// Recurse back into this routine to re-compute state now that this isn't
@@ -9194,7 +9651,7 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
int DestOffset) {
auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
- return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
+ return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
};
auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
int Word) {
@@ -9213,7 +9670,7 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
// If the source half mask maps over the inputs, turn those into
// swaps and use the swapped lane.
if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
- if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
+ if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
Input - SourceOffset;
// We have to swap the uses in our half mask in one sweep.
@@ -9234,7 +9691,7 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
}
// Map the input's dword into the correct half.
- if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
+ if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
else
assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
@@ -9280,17 +9737,17 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
// the inputs, place the other input in it. We use (Index XOR 1) to
// compute an adjacent index.
if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
- SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
+ SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
InputsFixed[1] = InputsFixed[0] ^ 1;
} else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
- SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
+ SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
InputsFixed[0] = InputsFixed[1] ^ 1;
- } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
- SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
+ } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
+ SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
// The two inputs are in the same DWord but it is clobbered and the
// adjacent DWord isn't used at all. Move both inputs to the free
// slot.
@@ -9304,7 +9761,7 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
// free slot adjacent to one of the inputs. In this case, we have to
// swap an input with a non-input.
for (int i = 0; i < 4; ++i)
- assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
+ assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
"We can't handle any clobbers here!");
assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
"Cannot have adjacent inputs here!");
@@ -9338,8 +9795,8 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
}
// Now hoist the DWord down to the right half.
- int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
- assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
+ int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
+ assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
for (int &M : HalfMask)
for (int Input : IncomingInputs)
@@ -9367,11 +9824,9 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
// At this point, each half should contain all its inputs, and we can then
// just shuffle them into their final position.
- assert(std::count_if(LoMask.begin(), LoMask.end(),
- [](int M) { return M >= 4; }) == 0 &&
+ assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
"Failed to lift all the high half inputs to the low mask!");
- assert(std::count_if(HiMask.begin(), HiMask.end(),
- [](int M) { return M >= 0 && M < 4; }) == 0 &&
+ assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
"Failed to lift all the low half inputs to the high mask!");
// Do a half shuffle for the low mask.
@@ -9390,11 +9845,11 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
return V;
}
-/// \brief Helper to form a PSHUFB-based shuffle+blend.
-static SDValue lowerVectorShuffleAsPSHUFB(SDLoc DL, MVT VT, SDValue V1,
- SDValue V2, ArrayRef<int> Mask,
- SelectionDAG &DAG, bool &V1InUse,
- bool &V2InUse) {
+/// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
+/// blend if only one input is used.
+static SDValue lowerVectorShuffleAsBlendOfPSHUFBs(
+ const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
+ SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
SDValue V1Mask[16];
SDValue V2Mask[16];
@@ -9404,7 +9859,7 @@ static SDValue lowerVectorShuffleAsPSHUFB(SDLoc DL, MVT VT, SDValue V1,
int Size = Mask.size();
int Scale = 16 / Size;
for (int i = 0; i < 16; ++i) {
- if (Mask[i / Scale] == -1) {
+ if (Mask[i / Scale] < 0) {
V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
} else {
const int ZeroMask = 0x80;
@@ -9425,11 +9880,11 @@ static SDValue lowerVectorShuffleAsPSHUFB(SDLoc DL, MVT VT, SDValue V1,
if (V1InUse)
V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
DAG.getBitcast(MVT::v16i8, V1),
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
+ DAG.getBuildVector(MVT::v16i8, DL, V1Mask));
if (V2InUse)
V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
DAG.getBitcast(MVT::v16i8, V2),
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
+ DAG.getBuildVector(MVT::v16i8, DL, V2Mask));
// If we need shuffled inputs from both, blend the two.
SDValue V;
@@ -9454,42 +9909,31 @@ static SDValue lowerVectorShuffleAsPSHUFB(SDLoc DL, MVT VT, SDValue V1,
/// the two inputs, try to interleave them. Otherwise, blend the low and high
/// halves of the inputs separately (making them have relatively few inputs)
/// and then concatenate them.
-static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV8I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> OrigMask = SVOp->getMask();
- int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
- OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
- MutableArrayRef<int> Mask(MaskStorage);
-
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
- DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
+ DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
return ZExt;
- auto isV1 = [](int M) { return M >= 0 && M < 8; };
- (void)isV1;
- auto isV2 = [](int M) { return M >= 8; };
-
- int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
+ int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
if (NumV2Inputs == 0) {
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8i16, V1,
- Mask, Subtarget, DAG))
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
+ DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
+ Subtarget, DAG))
return Shift;
// Use dedicated unpack instructions for masks that match their pattern.
@@ -9502,21 +9946,24 @@ static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
Mask, Subtarget, DAG))
return Rotate;
- return lowerV8I16GeneralSingleInputVectorShuffle(DL, MVT::v8i16, V1, Mask,
- Subtarget, DAG);
+ // Make a copy of the mask so it can be modified.
+ SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end());
+ return lowerV8I16GeneralSingleInputVectorShuffle(DL, MVT::v8i16, V1,
+ MutableMask, Subtarget,
+ DAG);
}
- assert(std::any_of(Mask.begin(), Mask.end(), isV1) &&
+ assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
"All single-input shuffles should be canonicalized to be V1-input "
"shuffles.");
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
+ Subtarget, DAG))
return Shift;
// See if we can use SSE4A Extraction / Insertion.
- if (Subtarget->hasSSE4A())
+ if (Subtarget.hasSSE4A())
if (SDValue V = lowerVectorShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask, DAG))
return V;
@@ -9528,7 +9975,7 @@ static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// We have different paths for blend lowering, but they all must use the
// *exact* same predicate.
- bool IsBlendSupported = Subtarget->hasSSE41();
+ bool IsBlendSupported = Subtarget.hasSSE41();
if (IsBlendSupported)
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
Subtarget, DAG))
@@ -9552,16 +9999,17 @@ static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
lowerVectorShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
return BitBlend;
+ // Try to lower by permuting the inputs into an unpack instruction.
if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1,
V2, Mask, DAG))
return Unpack;
// If we can't directly blend but can use PSHUFB, that will be better as it
// can both shuffle and set up the inefficient blend.
- if (!IsBlendSupported && Subtarget->hasSSSE3()) {
+ if (!IsBlendSupported && Subtarget.hasSSSE3()) {
bool V1InUse, V2InUse;
- return lowerVectorShuffleAsPSHUFB(DL, MVT::v8i16, V1, V2, Mask, DAG,
- V1InUse, V2InUse);
+ return lowerVectorShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask, DAG,
+ V1InUse, V2InUse);
}
// We can always bit-blend if we have to so the fallback strategy is to
@@ -9591,10 +10039,8 @@ static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
///
/// \returns N above, or the number of times even elements must be dropped if
/// there is such a number. Otherwise returns zero.
-static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
- // Figure out whether we're looping over two inputs or just one.
- bool IsSingleInput = isSingleInputShuffleMask(Mask);
-
+static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
+ bool IsSingleInput) {
// The modulus for the shuffle vector entries is based on whether this is
// a single input or not.
int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
@@ -9611,7 +10057,7 @@ static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
for (int i = 0, e = Mask.size(); i < e; ++i) {
// Ignore undef lanes, we'll optimistically collapse them to the pattern we
// want.
- if (Mask[i] == -1)
+ if (Mask[i] < 0)
continue;
bool IsAnyViable = false;
@@ -9645,20 +10091,17 @@ static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
/// UNPCK to spread the i8 elements across two i16-element vectors, and uses
/// the existing lowering for v8i16 blends on each half, finally PACK-ing them
/// back together.
-static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV16I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
+ Subtarget, DAG))
return Shift;
// Try to use byte rotation instructions.
@@ -9672,18 +10115,17 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return ZExt;
// See if we can use SSE4A Extraction / Insertion.
- if (Subtarget->hasSSE4A())
+ if (Subtarget.hasSSE4A())
if (SDValue V = lowerVectorShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask, DAG))
return V;
- int NumV2Elements =
- std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
+ int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
// For single-input shuffles, there are some nicer lowering tricks we can use.
if (NumV2Elements == 0) {
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v16i8, V1,
- Mask, Subtarget, DAG))
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
+ DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Check whether we can widen this to an i16 shuffle by duplicating bytes.
@@ -9696,7 +10138,7 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// i16 shuffle as well.
auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
for (int i = 0; i < 16; i += 2)
- if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
+ if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
return false;
return true;
@@ -9734,7 +10176,7 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
// If we haven't yet mapped the input, search for a slot into which
// we can map it.
- while (j < je && PreDupI16Shuffle[j] != -1)
+ while (j < je && PreDupI16Shuffle[j] >= 0)
++j;
if (j == je)
@@ -9759,10 +10201,10 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
for (int i = 0; i < 16; ++i)
- if (Mask[i] != -1) {
+ if (Mask[i] >= 0) {
int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
- if (PostDupI16Shuffle[i / 2] == -1)
+ if (PostDupI16Shuffle[i / 2] < 0)
PostDupI16Shuffle[i / 2] = MappedMask;
else
assert(PostDupI16Shuffle[i / 2] == MappedMask &&
@@ -9799,18 +10241,18 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// FIXME: The only exceptions to the above are blends which are exact
// interleavings with direct instructions supporting them. We currently don't
// handle those well here.
- if (Subtarget->hasSSSE3()) {
+ if (Subtarget.hasSSSE3()) {
bool V1InUse = false;
bool V2InUse = false;
- SDValue PSHUFB = lowerVectorShuffleAsPSHUFB(DL, MVT::v16i8, V1, V2, Mask,
- DAG, V1InUse, V2InUse);
+ SDValue PSHUFB = lowerVectorShuffleAsBlendOfPSHUFBs(
+ DL, MVT::v16i8, V1, V2, Mask, DAG, V1InUse, V2InUse);
// If both V1 and V2 are in use and we can use a direct blend or an unpack,
// do so. This avoids using them to handle blends-with-zero which is
// important as a single pshufb is significantly faster for that.
if (V1InUse && V2InUse) {
- if (Subtarget->hasSSE41())
+ if (Subtarget.hasSSE41())
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i8, V1, V2,
Mask, Subtarget, DAG))
return Blend;
@@ -9848,11 +10290,11 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// We special case these as they can be particularly efficiently handled with
// the PACKUSB instruction on x86 and they show up in common patterns of
// rearranging bytes to truncate wide elements.
- if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
+ bool IsSingleInput = V2.isUndef();
+ if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput)) {
// NumEvenDrops is the power of two stride of the elements. Another way of
// thinking about it is that we need to drop the even elements this many
// times to get the original input.
- bool IsSingleInput = isSingleInputShuffleMask(Mask);
// First we need to zero all the dropped bytes.
assert(NumEvenDrops <= 3 &&
@@ -9907,7 +10349,7 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// Use a mask to drop the high bytes.
VLoHalf = DAG.getBitcast(MVT::v8i16, V);
VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
- DAG.getConstant(0x00FF, DL, MVT::v8i16));
+ DAG.getConstant(0x00FF, DL, MVT::v8i16));
// This will be a single vector shuffle instead of a blend so nuke VHiHalf.
VHiHalf = DAG.getUNDEF(MVT::v8i16);
@@ -9938,22 +10380,23 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
///
/// This routine breaks down the specific type of 128-bit shuffle and
/// dispatches to the lowering routines accordingly.
-static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- MVT VT, const X86Subtarget *Subtarget,
+static SDValue lower128BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ MVT VT, SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
switch (VT.SimpleTy) {
case MVT::v2i64:
- return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV2I64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v2f64:
- return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV2F64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v4i32:
- return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV4I32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v4f32:
- return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV4F32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v8i16:
- return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV8I16VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v16i8:
- return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV16I8VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
default:
llvm_unreachable("Unimplemented!");
@@ -9971,21 +10414,22 @@ static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
/// a zero-ed lane of a vector.
static bool canWidenShuffleElements(ArrayRef<int> Mask,
SmallVectorImpl<int> &WidenedMask) {
+ WidenedMask.assign(Mask.size() / 2, 0);
for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
// If both elements are undef, its trivial.
if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
- WidenedMask.push_back(SM_SentinelUndef);
+ WidenedMask[i/2] = SM_SentinelUndef;
continue;
}
// Check for an undef mask and a mask value properly aligned to fit with
// a pair of values. If we find such a case, use the non-undef mask's value.
if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
- WidenedMask.push_back(Mask[i + 1] / 2);
+ WidenedMask[i/2] = Mask[i + 1] / 2;
continue;
}
if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
- WidenedMask.push_back(Mask[i] / 2);
+ WidenedMask[i/2] = Mask[i] / 2;
continue;
}
@@ -9993,7 +10437,7 @@ static bool canWidenShuffleElements(ArrayRef<int> Mask,
if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
(Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
- WidenedMask.push_back(SM_SentinelZero);
+ WidenedMask[i/2] = SM_SentinelZero;
continue;
}
return false;
@@ -10002,7 +10446,7 @@ static bool canWidenShuffleElements(ArrayRef<int> Mask,
// Finally check if the two mask values are adjacent and aligned with
// a pair.
if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
- WidenedMask.push_back(Mask[i] / 2);
+ WidenedMask[i/2] = Mask[i] / 2;
continue;
}
@@ -10020,7 +10464,7 @@ static bool canWidenShuffleElements(ArrayRef<int> Mask,
/// This routine just extracts two subvectors, shuffles them independently, and
/// then concatenates them back together. This should work effectively with all
/// AVX vector shuffle types.
-static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
+static SDValue splitAndLowerVectorShuffle(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
SelectionDAG &DAG) {
assert(VT.getSizeInBits() >= 256 &&
@@ -10039,8 +10483,7 @@ static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
// Rather than splitting build-vectors, just build two narrower build
// vectors. This helps shuffling with splats and zeros.
auto SplitVector = [&](SDValue V) {
- while (V.getOpcode() == ISD::BITCAST)
- V = V->getOperand(0);
+ V = peekThroughBitcasts(V);
MVT OrigVT = V.getSimpleValueType();
int OrigNumElements = OrigVT.getVectorNumElements();
@@ -10063,8 +10506,8 @@ static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
LoOps.push_back(BV->getOperand(i));
HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
}
- LoV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, LoOps);
- HiV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, HiOps);
+ LoV = DAG.getBuildVector(OrigSplitVT, DL, LoOps);
+ HiV = DAG.getBuildVector(OrigSplitVT, DL, HiOps);
}
return std::make_pair(DAG.getBitcast(SplitVT, LoV),
DAG.getBitcast(SplitVT, HiV));
@@ -10077,7 +10520,9 @@ static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
// Now create two 4-way blends of these half-width vectors.
auto HalfBlend = [&](ArrayRef<int> HalfMask) {
bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
- SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
+ SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
+ SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
+ SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
for (int i = 0; i < SplitNumElements; ++i) {
int M = HalfMask[i];
if (M >= NumElements) {
@@ -10085,21 +10530,15 @@ static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
UseHiV2 = true;
else
UseLoV2 = true;
- V2BlendMask.push_back(M - NumElements);
- V1BlendMask.push_back(-1);
- BlendMask.push_back(SplitNumElements + i);
+ V2BlendMask[i] = M - NumElements;
+ BlendMask[i] = SplitNumElements + i;
} else if (M >= 0) {
if (M >= SplitNumElements)
UseHiV1 = true;
else
UseLoV1 = true;
- V2BlendMask.push_back(-1);
- V1BlendMask.push_back(M);
- BlendMask.push_back(i);
- } else {
- V2BlendMask.push_back(-1);
- V1BlendMask.push_back(-1);
- BlendMask.push_back(-1);
+ V1BlendMask[i] = M;
+ BlendMask[i] = i;
}
}
@@ -10151,12 +10590,12 @@ static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
/// between splitting the shuffle into 128-bit components and stitching those
/// back together vs. extracting the single-input shuffles and blending those
/// results.
-static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
- SDValue V2, ArrayRef<int> Mask,
+static SDValue lowerVectorShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT,
+ SDValue V1, SDValue V2,
+ ArrayRef<int> Mask,
SelectionDAG &DAG) {
- assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
- "lower single-input shuffles as it "
- "could then recurse on itself.");
+ assert(!V2.isUndef() && "This routine must not be used to lower single-input "
+ "shuffles as it could then recurse on itself.");
int Size = Mask.size();
// If this can be modeled as a broadcast of two elements followed by a blend,
@@ -10166,12 +10605,12 @@ static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
for (int M : Mask)
if (M >= Size) {
- if (V2BroadcastIdx == -1)
+ if (V2BroadcastIdx < 0)
V2BroadcastIdx = M - Size;
else if (M - Size != V2BroadcastIdx)
return false;
} else if (M >= 0) {
- if (V1BroadcastIdx == -1)
+ if (V1BroadcastIdx < 0)
V1BroadcastIdx = M;
else if (M != V1BroadcastIdx)
return false;
@@ -10210,54 +10649,51 @@ static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
/// is lower than any other fully general cross-lane shuffle strategy I'm aware
/// of. Special cases for each particular shuffle pattern should be handled
/// prior to trying this lowering.
-static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
+static SDValue lowerVectorShuffleAsLanePermuteAndBlend(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
SelectionDAG &DAG) {
// FIXME: This should probably be generalized for 512-bit vectors as well.
assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
- int LaneSize = Mask.size() / 2;
+ int Size = Mask.size();
+ int LaneSize = Size / 2;
// If there are only inputs from one 128-bit lane, splitting will in fact be
// less expensive. The flags track whether the given lane contains an element
// that crosses to another lane.
bool LaneCrossing[2] = {false, false};
- for (int i = 0, Size = Mask.size(); i < Size; ++i)
+ for (int i = 0; i < Size; ++i)
if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
if (!LaneCrossing[0] || !LaneCrossing[1])
return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
- if (isSingleInputShuffleMask(Mask)) {
- SmallVector<int, 32> FlippedBlendMask;
- for (int i = 0, Size = Mask.size(); i < Size; ++i)
- FlippedBlendMask.push_back(
- Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
- ? Mask[i]
- : Mask[i] % LaneSize +
- (i / LaneSize) * LaneSize + Size));
-
- // Flip the vector, and blend the results which should now be in-lane. The
- // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
- // 5 for the high source. The value 3 selects the high half of source 2 and
- // the value 2 selects the low half of source 2. We only use source 2 to
- // allow folding it into a memory operand.
- unsigned PERMMask = 3 | 2 << 4;
- SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
- V1, DAG.getConstant(PERMMask, DL, MVT::i8));
- return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
- }
-
- // This now reduces to two single-input shuffles of V1 and V2 which at worst
- // will be handled by the above logic and a blend of the results, much like
- // other patterns in AVX.
- return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
+ assert(V2.isUndef() &&
+ "This last part of this routine only works on single input shuffles");
+
+ SmallVector<int, 32> FlippedBlendMask(Size);
+ for (int i = 0; i < Size; ++i)
+ FlippedBlendMask[i] =
+ Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
+ ? Mask[i]
+ : Mask[i] % LaneSize +
+ (i / LaneSize) * LaneSize + Size);
+
+ // Flip the vector, and blend the results which should now be in-lane. The
+ // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
+ // 5 for the high source. The value 3 selects the high half of source 2 and
+ // the value 2 selects the low half of source 2. We only use source 2 to
+ // allow folding it into a memory operand.
+ unsigned PERMMask = 3 | 2 << 4;
+ SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
+ V1, DAG.getConstant(PERMMask, DL, MVT::i8));
+ return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
}
/// \brief Handle lowering 2-lane 128-bit shuffles.
-static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
+static SDValue lowerV2X128VectorShuffle(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// TODO: If minimizing size and one of the inputs is a zero vector and the
// the zero vector has only one use, we could use a VPERM2X128 to save the
@@ -10278,6 +10714,10 @@ static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
// subvector.
bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
+ // With AVX2 we should use VPERMQ/VPERMPD to allow memory folding.
+ if (Subtarget.hasAVX2() && V2.isUndef())
+ return SDValue();
+
MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
VT.getVectorNumElements() / 2);
SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
@@ -10349,10 +10789,9 @@ static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
/// in x86 only floating point has interesting non-repeating shuffles, and even
/// those are still *marginally* more expensive.
static SDValue lowerVectorShuffleByMerging128BitLanes(
- SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
- const X86Subtarget *Subtarget, SelectionDAG &DAG) {
- assert(!isSingleInputShuffleMask(Mask) &&
- "This is only useful with multiple inputs.");
+ const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
+ const X86Subtarget &Subtarget, SelectionDAG &DAG) {
+ assert(!V2.isUndef() && "This is only useful with multiple inputs.");
int Size = Mask.size();
int LaneSize = 128 / VT.getScalarSizeInBits();
@@ -10361,10 +10800,8 @@ static SDValue lowerVectorShuffleByMerging128BitLanes(
// See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
// check whether the in-128-bit lane shuffles share a repeating pattern.
- SmallVector<int, 4> Lanes;
- Lanes.resize(NumLanes, -1);
- SmallVector<int, 4> InLaneMask;
- InLaneMask.resize(LaneSize, -1);
+ SmallVector<int, 4> Lanes((unsigned)NumLanes, -1);
+ SmallVector<int, 4> InLaneMask((unsigned)LaneSize, -1);
for (int i = 0; i < Size; ++i) {
if (Mask[i] < 0)
continue;
@@ -10392,8 +10829,7 @@ static SDValue lowerVectorShuffleByMerging128BitLanes(
// First shuffle the lanes into place.
MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
VT.getSizeInBits() / 64);
- SmallVector<int, 8> LaneMask;
- LaneMask.resize(NumLanes * 2, -1);
+ SmallVector<int, 8> LaneMask((unsigned)NumLanes * 2, -1);
for (int i = 0; i < NumLanes; ++i)
if (Lanes[i] >= 0) {
LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
@@ -10408,8 +10844,7 @@ static SDValue lowerVectorShuffleByMerging128BitLanes(
LaneShuffle = DAG.getBitcast(VT, LaneShuffle);
// Now do a simple shuffle that isn't lane crossing.
- SmallVector<int, 8> NewMask;
- NewMask.resize(Size, -1);
+ SmallVector<int, 8> NewMask((unsigned)Size, -1);
for (int i = 0; i < Size; ++i)
if (Mask[i] >= 0)
NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
@@ -10422,11 +10857,12 @@ static SDValue lowerVectorShuffleByMerging128BitLanes(
/// Lower shuffles where an entire half of a 256-bit vector is UNDEF.
/// This allows for fast cases such as subvector extraction/insertion
/// or shuffling smaller vector types which can lower more efficiently.
-static SDValue lowerVectorShuffleWithUndefHalf(SDLoc DL, MVT VT, SDValue V1,
- SDValue V2, ArrayRef<int> Mask,
- const X86Subtarget *Subtarget,
+static SDValue lowerVectorShuffleWithUndefHalf(const SDLoc &DL, MVT VT,
+ SDValue V1, SDValue V2,
+ ArrayRef<int> Mask,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- assert(VT.getSizeInBits() == 256 && "Expected 256-bit vector");
+ assert(VT.is256BitVector() && "Expected 256-bit vector");
unsigned NumElts = VT.getVectorNumElements();
unsigned HalfNumElts = NumElts / 2;
@@ -10457,21 +10893,16 @@ static SDValue lowerVectorShuffleWithUndefHalf(SDLoc DL, MVT VT, SDValue V1,
DAG.getIntPtrConstant(HalfNumElts, DL));
}
- // AVX2 supports efficient immediate 64-bit element cross-lane shuffles.
- if (UndefLower && Subtarget->hasAVX2() &&
- (VT == MVT::v4f64 || VT == MVT::v4i64))
- return SDValue();
-
- // If the shuffle only uses the lower halves of the input operands,
+ // If the shuffle only uses two of the four halves of the input operands,
// then extract them and perform the 'half' shuffle at half width.
// e.g. vector_shuffle <X, X, X, X, u, u, u, u> or <X, X, u, u>
int HalfIdx1 = -1, HalfIdx2 = -1;
- SmallVector<int, 8> HalfMask;
+ SmallVector<int, 8> HalfMask(HalfNumElts);
unsigned Offset = UndefLower ? HalfNumElts : 0;
for (unsigned i = 0; i != HalfNumElts; ++i) {
int M = Mask[i + Offset];
if (M < 0) {
- HalfMask.push_back(M);
+ HalfMask[i] = M;
continue;
}
@@ -10479,23 +10910,18 @@ static SDValue lowerVectorShuffleWithUndefHalf(SDLoc DL, MVT VT, SDValue V1,
// i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
int HalfIdx = M / HalfNumElts;
- // Only shuffle using the lower halves of the inputs.
- // TODO: Investigate usefulness of shuffling with upper halves.
- if (HalfIdx != 0 && HalfIdx != 2)
- return SDValue();
-
// Determine the element index into its half vector source.
int HalfElt = M % HalfNumElts;
// We can shuffle with up to 2 half vectors, set the new 'half'
// shuffle mask accordingly.
- if (-1 == HalfIdx1 || HalfIdx1 == HalfIdx) {
- HalfMask.push_back(HalfElt);
+ if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
+ HalfMask[i] = HalfElt;
HalfIdx1 = HalfIdx;
continue;
}
- if (-1 == HalfIdx2 || HalfIdx2 == HalfIdx) {
- HalfMask.push_back(HalfElt + HalfNumElts);
+ if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
+ HalfMask[i] = HalfElt + HalfNumElts;
HalfIdx2 = HalfIdx;
continue;
}
@@ -10505,6 +10931,33 @@ static SDValue lowerVectorShuffleWithUndefHalf(SDLoc DL, MVT VT, SDValue V1,
}
assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
+ // Only shuffle the halves of the inputs when useful.
+ int NumLowerHalves =
+ (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
+ int NumUpperHalves =
+ (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
+
+ // uuuuXXXX - don't extract uppers just to insert again.
+ if (UndefLower && NumUpperHalves != 0)
+ return SDValue();
+
+ // XXXXuuuu - don't extract both uppers, instead shuffle and then extract.
+ if (UndefUpper && NumUpperHalves == 2)
+ return SDValue();
+
+ // AVX2 - XXXXuuuu - always extract lowers.
+ if (Subtarget.hasAVX2() && !(UndefUpper && NumUpperHalves == 0)) {
+ // AVX2 supports efficient immediate 64-bit element cross-lane shuffles.
+ if (VT == MVT::v4f64 || VT == MVT::v4i64)
+ return SDValue();
+ // AVX2 supports variable 32-bit element cross-lane shuffles.
+ if (VT == MVT::v8f32 || VT == MVT::v8i32) {
+ // XXXXuuuu - don't extract lowers and uppers.
+ if (UndefUpper && NumLowerHalves != 0 && NumUpperHalves != 0)
+ return SDValue();
+ }
+ }
+
auto GetHalfVector = [&](int HalfIdx) {
if (HalfIdx < 0)
return DAG.getUNDEF(HalfVT);
@@ -10536,7 +10989,177 @@ static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
return true;
}
-static SDValue lowerVectorShuffleWithSHUFPD(SDLoc DL, MVT VT,
+/// Handle case where shuffle sources are coming from the same 128-bit lane and
+/// every lane can be represented as the same repeating mask - allowing us to
+/// shuffle the sources with the repeating shuffle and then permute the result
+/// to the destination lanes.
+static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
+ const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
+ const X86Subtarget &Subtarget, SelectionDAG &DAG) {
+ int NumElts = VT.getVectorNumElements();
+ int NumLanes = VT.getSizeInBits() / 128;
+ int NumLaneElts = NumElts / NumLanes;
+
+ // On AVX2 we may be able to just shuffle the lowest elements and then
+ // broadcast the result.
+ if (Subtarget.hasAVX2()) {
+ for (unsigned BroadcastSize : {16, 32, 64}) {
+ if (BroadcastSize <= VT.getScalarSizeInBits())
+ continue;
+ int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
+
+ // Attempt to match a repeating pattern every NumBroadcastElts,
+ // accounting for UNDEFs but only references the lowest 128-bit
+ // lane of the inputs.
+ auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
+ for (int i = 0; i != NumElts; i += NumBroadcastElts)
+ for (int j = 0; j != NumBroadcastElts; ++j) {
+ int M = Mask[i + j];
+ if (M < 0)
+ continue;
+ int &R = RepeatMask[j];
+ if (0 != ((M % NumElts) / NumLaneElts))
+ return false;
+ if (0 <= R && R != M)
+ return false;
+ R = M;
+ }
+ return true;
+ };
+
+ SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
+ if (!FindRepeatingBroadcastMask(RepeatMask))
+ continue;
+
+ // Shuffle the (lowest) repeated elements in place for broadcast.
+ SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
+
+ // Shuffle the actual broadcast.
+ SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
+ for (int i = 0; i != NumElts; i += NumBroadcastElts)
+ for (int j = 0; j != NumBroadcastElts; ++j)
+ BroadcastMask[i + j] = j;
+ return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
+ BroadcastMask);
+ }
+ }
+
+ // Bail if the shuffle mask doesn't cross 128-bit lanes.
+ if (!is128BitLaneCrossingShuffleMask(VT, Mask))
+ return SDValue();
+
+ // Bail if we already have a repeated lane shuffle mask.
+ SmallVector<int, 8> RepeatedShuffleMask;
+ if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask))
+ return SDValue();
+
+ // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
+ // (with PERMQ/PERMPD), otherwise we can only permute whole 128-bit lanes.
+ int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1;
+ int NumSubLanes = NumLanes * SubLaneScale;
+ int NumSubLaneElts = NumLaneElts / SubLaneScale;
+
+ // Check that all the sources are coming from the same lane and see if we can
+ // form a repeating shuffle mask (local to each sub-lane). At the same time,
+ // determine the source sub-lane for each destination sub-lane.
+ int TopSrcSubLane = -1;
+ SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
+ SmallVector<int, 8> RepeatedSubLaneMasks[2] = {
+ SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef),
+ SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef)};
+
+ for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
+ // Extract the sub-lane mask, check that it all comes from the same lane
+ // and normalize the mask entries to come from the first lane.
+ int SrcLane = -1;
+ SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
+ for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
+ int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
+ if (M < 0)
+ continue;
+ int Lane = (M % NumElts) / NumLaneElts;
+ if ((0 <= SrcLane) && (SrcLane != Lane))
+ return SDValue();
+ SrcLane = Lane;
+ int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
+ SubLaneMask[Elt] = LocalM;
+ }
+
+ // Whole sub-lane is UNDEF.
+ if (SrcLane < 0)
+ continue;
+
+ // Attempt to match against the candidate repeated sub-lane masks.
+ for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
+ auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
+ for (int i = 0; i != NumSubLaneElts; ++i) {
+ if (M1[i] < 0 || M2[i] < 0)
+ continue;
+ if (M1[i] != M2[i])
+ return false;
+ }
+ return true;
+ };
+
+ auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
+ if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
+ continue;
+
+ // Merge the sub-lane mask into the matching repeated sub-lane mask.
+ for (int i = 0; i != NumSubLaneElts; ++i) {
+ int M = SubLaneMask[i];
+ if (M < 0)
+ continue;
+ assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
+ "Unexpected mask element");
+ RepeatedSubLaneMask[i] = M;
+ }
+
+ // Track the top most source sub-lane - by setting the remaining to UNDEF
+ // we can greatly simplify shuffle matching.
+ int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
+ TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
+ Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
+ break;
+ }
+
+ // Bail if we failed to find a matching repeated sub-lane mask.
+ if (Dst2SrcSubLanes[DstSubLane] < 0)
+ return SDValue();
+ }
+ assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
+ "Unexpected source lane");
+
+ // Create a repeating shuffle mask for the entire vector.
+ SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
+ for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
+ int Lane = SubLane / SubLaneScale;
+ auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
+ for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
+ int M = RepeatedSubLaneMask[Elt];
+ if (M < 0)
+ continue;
+ int Idx = (SubLane * NumSubLaneElts) + Elt;
+ RepeatedMask[Idx] = M + (Lane * NumLaneElts);
+ }
+ }
+ SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
+
+ // Shuffle each source sub-lane to its destination.
+ SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
+ for (int i = 0; i != NumElts; i += NumSubLaneElts) {
+ int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
+ if (SrcSubLane < 0)
+ continue;
+ for (int j = 0; j != NumSubLaneElts; ++j)
+ SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
+ }
+
+ return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
+ SubLaneMask);
+}
+
+static SDValue lowerVectorShuffleWithSHUFPD(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG) {
@@ -10571,25 +11194,24 @@ static SDValue lowerVectorShuffleWithSHUFPD(SDLoc DL, MVT VT,
///
/// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
/// isn't available.
-static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV4F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
SmallVector<int, 4> WidenedMask;
if (canWidenShuffleElements(Mask, WidenedMask))
- return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
- DAG);
+ if (SDValue V = lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask,
+ Subtarget, DAG))
+ return V;
- if (isSingleInputShuffleMask(Mask)) {
+ if (V2.isUndef()) {
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4f64, V1,
- Mask, Subtarget, DAG))
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
+ DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Use low duplicate instructions for masks that match their pattern.
@@ -10597,7 +11219,7 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
- // Non-half-crossing single input shuffles can be lowerid with an
+ // Non-half-crossing single input shuffles can be lowered with an
// interleaved permutation.
unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
@@ -10606,10 +11228,16 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
}
// With AVX2 we have direct support for this permutation.
- if (Subtarget->hasAVX2())
+ if (Subtarget.hasAVX2())
return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
+ // Try to create an in-lane repeating shuffle mask and then shuffle the
+ // the results into the target lanes.
+ if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
+ DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
+ return V;
+
// Otherwise, fall back.
return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
DAG);
@@ -10629,19 +11257,25 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
lowerVectorShuffleWithSHUFPD(DL, MVT::v4f64, Mask, V1, V2, DAG))
return Op;
+ // Try to create an in-lane repeating shuffle mask and then shuffle the
+ // the results into the target lanes.
+ if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
+ DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
+ return V;
+
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle. However, if we have AVX2 and either inputs are already in place,
// we will be able to shuffle even across lanes the other input in a single
// instruction so skip this pattern.
- if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
- isShuffleMaskInputInPlace(1, Mask))))
+ if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
+ isShuffleMaskInputInPlace(1, Mask))))
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
return Result;
// If we have AVX2 then we always want to lower with a blend because an v4 we
// can fully permute the elements.
- if (Subtarget->hasAVX2())
+ if (Subtarget.hasAVX2())
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
Mask, DAG);
@@ -10653,59 +11287,53 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v4i64 shuffling..
-static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV4I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
- assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
+ assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
SmallVector<int, 4> WidenedMask;
if (canWidenShuffleElements(Mask, WidenedMask))
- return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
- DAG);
+ if (SDValue V = lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask,
+ Subtarget, DAG))
+ return V;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
Subtarget, DAG))
return Blend;
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4i64, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4i64, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
- // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
- // use lower latency instructions that will operate on both 128-bit lanes.
- SmallVector<int, 2> RepeatedMask;
- if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
- if (isSingleInputShuffleMask(Mask)) {
- int PSHUFDMask[] = {-1, -1, -1, -1};
- for (int i = 0; i < 2; ++i)
- if (RepeatedMask[i] >= 0) {
- PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
- PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
- }
+ if (V2.isUndef()) {
+ // When the shuffle is mirrored between the 128-bit lanes of the unit, we
+ // can use lower latency instructions that will operate on both lanes.
+ SmallVector<int, 2> RepeatedMask;
+ if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
+ SmallVector<int, 4> PSHUFDMask;
+ scaleShuffleMask(2, RepeatedMask, PSHUFDMask);
return DAG.getBitcast(
MVT::v4i64,
DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
DAG.getBitcast(MVT::v8i32, V1),
getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
}
- }
- // AVX2 provides a direct instruction for permuting a single input across
- // lanes.
- if (isSingleInputShuffleMask(Mask))
+ // AVX2 provides a direct instruction for permuting a single input across
+ // lanes.
return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
+ }
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
+ Subtarget, DAG))
return Shift;
// Use dedicated unpack instructions for masks that match their pattern.
@@ -10717,7 +11345,7 @@ static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// shuffle. However, if we have AVX2 and either inputs are already in place,
// we will be able to shuffle even across lanes the other input in a single
// instruction so skip this pattern.
- if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
+ if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
isShuffleMaskInputInPlace(1, Mask))))
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
@@ -10732,14 +11360,12 @@ static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
///
/// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
/// isn't available.
-static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV8F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
@@ -10747,7 +11373,7 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return Blend;
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8f32, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8f32, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
@@ -10759,12 +11385,12 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
"Repeated masks must be half the mask width!");
// Use even/odd duplicate instructions for masks that match their pattern.
- if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
+ if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
- if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3, 5, 5, 7, 7}))
+ if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
- if (isSingleInputShuffleMask(Mask))
+ if (V2.isUndef())
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
@@ -10774,30 +11400,30 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return V;
// Otherwise, fall back to a SHUFPS sequence. Here it is important that we
- // have already handled any direct blends. We also need to squash the
- // repeated mask into a simulated v4f32 mask.
- for (int i = 0; i < 4; ++i)
- if (RepeatedMask[i] >= 8)
- RepeatedMask[i] -= 4;
+ // have already handled any direct blends.
return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
}
+ // Try to create an in-lane repeating shuffle mask and then shuffle the
+ // the results into the target lanes.
+ if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
+ DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
+ return V;
+
// If we have a single input shuffle with different shuffle patterns in the
// two 128-bit lanes use the variable mask to VPERMILPS.
- if (isSingleInputShuffleMask(Mask)) {
+ if (V2.isUndef()) {
SDValue VPermMask[8];
for (int i = 0; i < 8; ++i)
VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
: DAG.getConstant(Mask[i], DL, MVT::i32);
if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
- return DAG.getNode(
- X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
+ return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
+ DAG.getBuildVector(MVT::v8i32, DL, VPermMask));
- if (Subtarget->hasAVX2())
- return DAG.getNode(
- X86ISD::VPERMV, DL, MVT::v8f32,
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
+ if (Subtarget.hasAVX2())
+ return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
+ DAG.getBuildVector(MVT::v8i32, DL, VPermMask), V1);
// Otherwise, fall back.
return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
@@ -10812,7 +11438,7 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// If we have AVX2 then we always want to lower with a blend because at v8 we
// can fully permute the elements.
- if (Subtarget->hasAVX2())
+ if (Subtarget.hasAVX2())
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
Mask, DAG);
@@ -10824,16 +11450,14 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v8i32 shuffling..
-static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV8I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
- assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
+ assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
@@ -10847,7 +11471,7 @@ static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return Blend;
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8i32, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8i32, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
@@ -10857,7 +11481,7 @@ static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
SmallVector<int, 4> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
- if (isSingleInputShuffleMask(Mask))
+ if (V2.isUndef())
return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
@@ -10868,24 +11492,30 @@ static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
}
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
+ Subtarget, DAG))
return Shift;
+ // Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
return Rotate;
+ // Try to create an in-lane repeating shuffle mask and then shuffle the
+ // the results into the target lanes.
+ if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
+ DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
+ return V;
+
// If the shuffle patterns aren't repeated but it is a single input, directly
// generate a cross-lane VPERMD instruction.
- if (isSingleInputShuffleMask(Mask)) {
+ if (V2.isUndef()) {
SDValue VPermMask[8];
for (int i = 0; i < 8; ++i)
VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
: DAG.getConstant(Mask[i], DL, MVT::i32);
- return DAG.getNode(
- X86ISD::VPERMV, DL, MVT::v8i32,
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
+ return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32,
+ DAG.getBuildVector(MVT::v8i32, DL, VPermMask), V1);
}
// Try to simplify this by merging 128-bit lanes to enable a lane-based
@@ -10903,16 +11533,14 @@ static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v16i16 shuffling..
-static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV16I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
- assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
+ assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
@@ -10922,7 +11550,7 @@ static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return ZExt;
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v16i16, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v16i16, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
@@ -10936,8 +11564,8 @@ static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return V;
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
+ Subtarget, DAG))
return Shift;
// Try to use byte rotation instructions.
@@ -10945,7 +11573,13 @@ static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
return Rotate;
- if (isSingleInputShuffleMask(Mask)) {
+ // Try to create an in-lane repeating shuffle mask and then shuffle the
+ // the results into the target lanes.
+ if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
+ DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
+ return V;
+
+ if (V2.isUndef()) {
// There are no generalized cross-lane shuffle operations available on i16
// element types.
if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
@@ -10960,26 +11594,12 @@ static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return lowerV8I16GeneralSingleInputVectorShuffle(
DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
}
-
- SDValue PSHUFBMask[32];
- for (int i = 0; i < 16; ++i) {
- if (Mask[i] == -1) {
- PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
- continue;
- }
-
- int M = i < 8 ? Mask[i] : Mask[i] - 8;
- assert(M >= 0 && M < 8 && "Invalid single-input mask!");
- PSHUFBMask[2 * i] = DAG.getConstant(2 * M, DL, MVT::i8);
- PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, DL, MVT::i8);
- }
- return DAG.getBitcast(MVT::v16i16,
- DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8,
- DAG.getBitcast(MVT::v32i8, V1),
- DAG.getNode(ISD::BUILD_VECTOR, DL,
- MVT::v32i8, PSHUFBMask)));
}
+ if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1,
+ V2, Subtarget, DAG))
+ return PSHUFB;
+
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle.
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
@@ -10994,16 +11614,14 @@ static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v32i8 shuffling..
-static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV32I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
- assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
+ assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
@@ -11013,7 +11631,7 @@ static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return ZExt;
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v32i8, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v32i8, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
@@ -11027,8 +11645,8 @@ static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return V;
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
+ Subtarget, DAG))
return Shift;
// Try to use byte rotation instructions.
@@ -11036,25 +11654,21 @@ static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
return Rotate;
- if (isSingleInputShuffleMask(Mask)) {
- // There are no generalized cross-lane shuffle operations available on i8
- // element types.
- if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
- return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
- Mask, DAG);
+ // Try to create an in-lane repeating shuffle mask and then shuffle the
+ // the results into the target lanes.
+ if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
+ DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
+ return V;
- SDValue PSHUFBMask[32];
- for (int i = 0; i < 32; ++i)
- PSHUFBMask[i] =
- Mask[i] < 0
- ? DAG.getUNDEF(MVT::i8)
- : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, DL,
- MVT::i8);
+ // There are no generalized cross-lane shuffle operations available on i8
+ // element types.
+ if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
+ return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2, Mask,
+ DAG);
- return DAG.getNode(
- X86ISD::PSHUFB, DL, MVT::v32i8, V1,
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
- }
+ if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1,
+ V2, Subtarget, DAG))
+ return PSHUFB;
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle.
@@ -11071,19 +11685,14 @@ static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
/// This routine either breaks down the specific type of a 256-bit x86 vector
/// shuffle or splits it into two 128-bit shuffles and fuses the results back
/// together based on the available instructions.
-static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- MVT VT, const X86Subtarget *Subtarget,
+static SDValue lower256BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ MVT VT, SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
-
// If we have a single input to the zero element, insert that into V1 if we
// can do so cheaply.
int NumElts = VT.getVectorNumElements();
- int NumV2Elements = std::count_if(Mask.begin(), Mask.end(), [NumElts](int M) {
- return M >= NumElts;
- });
+ int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
if (NumV2Elements == 1 && Mask[0] >= NumElts)
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
@@ -11101,11 +11710,17 @@ static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// essentially *zero* ability to manipulate a 256-bit vector with integer
// types. Since we'll use floating point types there eventually, just
// immediately cast everything to a float and operate entirely in that domain.
- if (VT.isInteger() && !Subtarget->hasAVX2()) {
+ if (VT.isInteger() && !Subtarget.hasAVX2()) {
int ElementBits = VT.getScalarSizeInBits();
- if (ElementBits < 32)
- // No floating point type available, decompose into 128-bit vectors.
+ if (ElementBits < 32) {
+ // No floating point type available, if we can't use the bit operations
+ // for masking/blending then decompose into 128-bit vectors.
+ if (SDValue V = lowerVectorShuffleAsBitMask(DL, VT, V1, V2, Mask, DAG))
+ return V;
+ if (SDValue V = lowerVectorShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
+ return V;
return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
+ }
MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
VT.getVectorNumElements());
@@ -11116,17 +11731,17 @@ static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
switch (VT.SimpleTy) {
case MVT::v4f64:
- return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV4F64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v4i64:
- return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV4I64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v8f32:
- return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV8F32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v8i32:
- return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV8I32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v16i16:
- return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV16I16VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v32i8:
- return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV32I8VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
default:
llvm_unreachable("Not a valid 256-bit x86 vector type!");
@@ -11134,21 +11749,37 @@ static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
}
/// \brief Try to lower a vector shuffle as a 128-bit shuffles.
-static SDValue lowerV4X128VectorShuffle(SDLoc DL, MVT VT,
- ArrayRef<int> Mask,
- SDValue V1, SDValue V2,
- SelectionDAG &DAG) {
+static SDValue lowerV4X128VectorShuffle(const SDLoc &DL, MVT VT,
+ ArrayRef<int> Mask, SDValue V1,
+ SDValue V2, SelectionDAG &DAG) {
assert(VT.getScalarSizeInBits() == 64 &&
"Unexpected element type size for 128bit shuffle.");
// To handle 256 bit vector requires VLX and most probably
// function lowerV2X128VectorShuffle() is better solution.
- assert(VT.is512BitVector() && "Unexpected vector size for 128bit shuffle.");
+ assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
SmallVector<int, 4> WidenedMask;
if (!canWidenShuffleElements(Mask, WidenedMask))
return SDValue();
+ SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
+ // Insure elements came from the same Op.
+ int MaxOp1Index = VT.getVectorNumElements()/2 - 1;
+ for (int i = 0, Size = WidenedMask.size(); i < Size; ++i) {
+ if (WidenedMask[i] == SM_SentinelZero)
+ return SDValue();
+ if (WidenedMask[i] == SM_SentinelUndef)
+ continue;
+
+ SDValue Op = WidenedMask[i] > MaxOp1Index ? V2 : V1;
+ unsigned OpIndex = (i < Size/2) ? 0 : 1;
+ if (Ops[OpIndex].isUndef())
+ Ops[OpIndex] = Op;
+ else if (Ops[OpIndex] != Op)
+ return SDValue();
+ }
+
// Form a 128-bit permutation.
// Convert the 64-bit shuffle mask selection values into 128-bit selection
// bits defined by a vshuf64x2 instruction's immediate control byte.
@@ -11156,19 +11787,16 @@ static SDValue lowerV4X128VectorShuffle(SDLoc DL, MVT VT,
unsigned ControlBitsNum = WidenedMask.size() / 2;
for (int i = 0, Size = WidenedMask.size(); i < Size; ++i) {
- if (WidenedMask[i] == SM_SentinelZero)
- return SDValue();
-
// Use first element in place of undef mask.
Imm = (WidenedMask[i] == SM_SentinelUndef) ? 0 : WidenedMask[i];
PermMask |= (Imm % WidenedMask.size()) << (i * ControlBitsNum);
}
- return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
+ return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
DAG.getConstant(PermMask, DL, MVT::i8));
}
-static SDValue lowerVectorShuffleWithPERMV(SDLoc DL, MVT VT,
+static SDValue lowerVectorShuffleWithPERMV(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG) {
@@ -11178,23 +11806,43 @@ static SDValue lowerVectorShuffleWithPERMV(SDLoc DL, MVT VT,
MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());
SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true);
- if (isSingleInputShuffleMask(Mask))
+ if (V2.isUndef())
return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);
return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
}
/// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
-static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV8F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
+ if (V2.isUndef()) {
+ // Use low duplicate instructions for masks that match their pattern.
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
+ return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
+
+ if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
+ // Non-half-crossing single input shuffles can be lowered with an
+ // interleaved permutation.
+ unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
+ ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
+ ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
+ ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
+ return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
+ DAG.getConstant(VPERMILPMask, DL, MVT::i8));
+ }
+
+ SmallVector<int, 4> RepeatedMask;
+ if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
+ return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
+ getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
+ }
+
if (SDValue Shuf128 =
lowerV4X128VectorShuffle(DL, MVT::v8f64, Mask, V1, V2, DAG))
return Shuf128;
@@ -11203,42 +11851,90 @@ static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
lowerVectorShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
return Unpck;
+ // Check if the blend happens to exactly fit that of SHUFPD.
+ if (SDValue Op =
+ lowerVectorShuffleWithSHUFPD(DL, MVT::v8f64, Mask, V1, V2, DAG))
+ return Op;
+
return lowerVectorShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
}
/// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
-static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV16F32VectorShuffle(SDLoc DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
- if (SDValue Unpck =
- lowerVectorShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
- return Unpck;
+ // If the shuffle mask is repeated in each 128-bit lane, we have many more
+ // options to efficiently lower the shuffle.
+ SmallVector<int, 4> RepeatedMask;
+ if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
+ assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
+
+ // Use even/odd duplicate instructions for masks that match their pattern.
+ if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
+ return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
+ if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
+ return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
+
+ if (V2.isUndef())
+ return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
+ getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
+
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (SDValue Unpck =
+ lowerVectorShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
+ return Unpck;
+
+ // Otherwise, fall back to a SHUFPS sequence.
+ return lowerVectorShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
+ }
return lowerVectorShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
}
/// \brief Handle lowering of 8-lane 64-bit integer shuffles.
-static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV8I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
if (SDValue Shuf128 =
lowerV4X128VectorShuffle(DL, MVT::v8i64, Mask, V1, V2, DAG))
return Shuf128;
+ if (V2.isUndef()) {
+ // When the shuffle is mirrored between the 128-bit lanes of the unit, we
+ // can use lower latency instructions that will operate on all four
+ // 128-bit lanes.
+ SmallVector<int, 2> Repeated128Mask;
+ if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
+ SmallVector<int, 4> PSHUFDMask;
+ scaleShuffleMask(2, Repeated128Mask, PSHUFDMask);
+ return DAG.getBitcast(
+ MVT::v8i64,
+ DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
+ DAG.getBitcast(MVT::v16i32, V1),
+ getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
+ }
+
+ SmallVector<int, 4> Repeated256Mask;
+ if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
+ return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
+ getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
+ }
+
+ // Try to use shift instructions.
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
+ Subtarget, DAG))
+ return Shift;
+
if (SDValue Unpck =
lowerVectorShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
return Unpck;
@@ -11247,49 +11943,111 @@ static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
}
/// \brief Handle lowering of 16-lane 32-bit integer shuffles.
-static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV16I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
- if (SDValue Unpck =
- lowerVectorShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
- return Unpck;
+ // If the shuffle mask is repeated in each 128-bit lane we can use more
+ // efficient instructions that mirror the shuffles across the four 128-bit
+ // lanes.
+ SmallVector<int, 4> RepeatedMask;
+ if (is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask)) {
+ assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
+ if (V2.isUndef())
+ return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
+ getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
+
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (SDValue V =
+ lowerVectorShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
+ return V;
+ }
+
+ // Try to use shift instructions.
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
+ Subtarget, DAG))
+ return Shift;
+
+ // Try to use byte rotation instructions.
+ if (Subtarget.hasBWI())
+ if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
+ DL, MVT::v16i32, V1, V2, Mask, Subtarget, DAG))
+ return Rotate;
return lowerVectorShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
}
/// \brief Handle lowering of 32-lane 16-bit integer shuffles.
-static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV32I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
- assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
+ assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
+
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (SDValue V =
+ lowerVectorShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
+ return V;
+
+ // Try to use shift instructions.
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
+ Subtarget, DAG))
+ return Shift;
+
+ // Try to use byte rotation instructions.
+ if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
+ DL, MVT::v32i16, V1, V2, Mask, Subtarget, DAG))
+ return Rotate;
+
+ if (V2.isUndef()) {
+ SmallVector<int, 8> RepeatedMask;
+ if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
+ // As this is a single-input shuffle, the repeated mask should be
+ // a strictly valid v8i16 mask that we can pass through to the v8i16
+ // lowering to handle even the v32 case.
+ return lowerV8I16GeneralSingleInputVectorShuffle(
+ DL, MVT::v32i16, V1, RepeatedMask, Subtarget, DAG);
+ }
+ }
return lowerVectorShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
}
/// \brief Handle lowering of 64-lane 8-bit integer shuffles.
-static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV64I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
- assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
+ assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
+
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (SDValue V =
+ lowerVectorShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
+ return V;
+
+ // Try to use shift instructions.
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
+ Subtarget, DAG))
+ return Shift;
+
+ // Try to use byte rotation instructions.
+ if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
+ DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
+ return Rotate;
+
+ if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1,
+ V2, Subtarget, DAG))
+ return PSHUFB;
// FIXME: Implement direct support for this type!
return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
@@ -11300,61 +12058,50 @@ static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
/// This routine either breaks down the specific type of a 512-bit x86 vector
/// shuffle or splits it into two 256-bit shuffles and fuses the results back
/// together based on the available instructions.
-static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- MVT VT, const X86Subtarget *Subtarget,
+static SDValue lower512BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ MVT VT, SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
- assert(Subtarget->hasAVX512() &&
+ assert(Subtarget.hasAVX512() &&
"Cannot lower 512-bit vectors w/ basic ISA!");
// Check for being able to broadcast a single element.
if (SDValue Broadcast =
- lowerVectorShuffleAsBroadcast(DL, VT, V1, Mask, Subtarget, DAG))
+ lowerVectorShuffleAsBroadcast(DL, VT, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
- // Dispatch to each element type for lowering. If we don't have supprot for
+ // Dispatch to each element type for lowering. If we don't have support for
// specific element type shuffles at 512 bits, immediately split them and
// lower them. Each lowering routine of a given type is allowed to assume that
// the requisite ISA extensions for that element type are available.
switch (VT.SimpleTy) {
case MVT::v8f64:
- return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV8F64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v16f32:
- return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV16F32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v8i64:
- return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV8I64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v16i32:
- return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV16I32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v32i16:
- if (Subtarget->hasBWI())
- return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
- break;
+ return lowerV32I16VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v64i8:
- if (Subtarget->hasBWI())
- return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
- break;
+ return lowerV64I8VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
default:
llvm_unreachable("Not a valid 512-bit x86 vector type!");
}
-
- // Otherwise fall back on splitting.
- return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
}
// Lower vXi1 vector shuffles.
// There is no a dedicated instruction on AVX-512 that shuffles the masks.
// The only way to shuffle bits is to sign-extend the mask vector to SIMD
// vector, shuffle and then truncate it back.
-static SDValue lower1BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- MVT VT, const X86Subtarget *Subtarget,
+static SDValue lower1BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ MVT VT, SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
- assert(Subtarget->hasAVX512() &&
+ assert(Subtarget.hasAVX512() &&
"Cannot lower 512-bit vectors w/o basic ISA!");
MVT ExtVT;
switch (VT.SimpleTy) {
@@ -11405,7 +12152,7 @@ static SDValue lower1BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
/// above in helper routines. The canonicalization attempts to widen shuffles
/// to involve fewer lanes of wider elements, consolidate symmetric patterns
/// s.t. only one of the two inputs needs to be tested, etc.
-static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
ArrayRef<int> Mask = SVOp->getMask();
@@ -11413,14 +12160,14 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
SDValue V2 = Op.getOperand(1);
MVT VT = Op.getSimpleValueType();
int NumElements = VT.getVectorNumElements();
- SDLoc dl(Op);
+ SDLoc DL(Op);
bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
"Can't lower MMX shuffles");
- bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
- bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
+ bool V1IsUndef = V1.isUndef();
+ bool V2IsUndef = V2.isUndef();
if (V1IsUndef && V2IsUndef)
return DAG.getUNDEF(VT);
@@ -11440,7 +12187,7 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
for (int &M : NewMask)
if (M >= NumElements)
M = -1;
- return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
+ return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
}
// We actually see shuffles that are entirely re-arrangements of a set of
@@ -11448,7 +12195,7 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
// simple ones. Directly lower these as a buildvector of zeros.
SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
if (Zeroable.all())
- return getZeroVector(VT, Subtarget, DAG, dl);
+ return getZeroVector(VT, Subtarget, DAG, DL);
// Try to collapse shuffles into using a vector type with fewer elements but
// wider element types. We cap this to not form integers or floating point
@@ -11467,12 +12214,12 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
V1 = DAG.getBitcast(NewVT, V1);
V2 = DAG.getBitcast(NewVT, V2);
return DAG.getBitcast(
- VT, DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
+ VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
}
}
int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
- for (int M : SVOp->getMask())
+ for (int M : Mask)
if (M < 0)
++NumUndefElements;
else if (M < NumElements)
@@ -11486,6 +12233,9 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
if (NumV2Elements > NumV1Elements)
return DAG.getCommutedVectorShuffle(*SVOp);
+ assert(NumV1Elements > 0 && "No V1 indices");
+ assert((NumV2Elements > 0 || V2IsUndef) && "V2 not undef, but not used");
+
// When the number of V1 and V2 elements are the same, try to minimize the
// number of uses of V2 in the low half of the vector. When that is tied,
// ensure that the sum of indices for V1 is equal to or lower than the sum
@@ -11493,28 +12243,28 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
// indices for V1 is lower than the number of odd indices for V2.
if (NumV1Elements == NumV2Elements) {
int LowV1Elements = 0, LowV2Elements = 0;
- for (int M : SVOp->getMask().slice(0, NumElements / 2))
+ for (int M : Mask.slice(0, NumElements / 2))
if (M >= NumElements)
++LowV2Elements;
else if (M >= 0)
++LowV1Elements;
- if (LowV2Elements > LowV1Elements) {
+ if (LowV2Elements > LowV1Elements)
return DAG.getCommutedVectorShuffle(*SVOp);
- } else if (LowV2Elements == LowV1Elements) {
+ if (LowV2Elements == LowV1Elements) {
int SumV1Indices = 0, SumV2Indices = 0;
- for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
- if (SVOp->getMask()[i] >= NumElements)
+ for (int i = 0, Size = Mask.size(); i < Size; ++i)
+ if (Mask[i] >= NumElements)
SumV2Indices += i;
- else if (SVOp->getMask()[i] >= 0)
+ else if (Mask[i] >= 0)
SumV1Indices += i;
- if (SumV2Indices < SumV1Indices) {
+ if (SumV2Indices < SumV1Indices)
return DAG.getCommutedVectorShuffle(*SVOp);
- } else if (SumV2Indices == SumV1Indices) {
+ if (SumV2Indices == SumV1Indices) {
int NumV1OddIndices = 0, NumV2OddIndices = 0;
- for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
- if (SVOp->getMask()[i] >= NumElements)
+ for (int i = 0, Size = Mask.size(); i < Size; ++i)
+ if (Mask[i] >= NumElements)
NumV2OddIndices += i % 2;
- else if (SVOp->getMask()[i] >= 0)
+ else if (Mask[i] >= 0)
NumV1OddIndices += i % 2;
if (NumV2OddIndices < NumV1OddIndices)
return DAG.getCommutedVectorShuffle(*SVOp);
@@ -11524,69 +12274,23 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
// For each vector width, delegate to a specialized lowering routine.
if (VT.is128BitVector())
- return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
+ return lower128BitVectorShuffle(DL, Mask, VT, V1, V2, Subtarget, DAG);
if (VT.is256BitVector())
- return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
+ return lower256BitVectorShuffle(DL, Mask, VT, V1, V2, Subtarget, DAG);
if (VT.is512BitVector())
- return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
+ return lower512BitVectorShuffle(DL, Mask, VT, V1, V2, Subtarget, DAG);
if (Is1BitVector)
- return lower1BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
- llvm_unreachable("Unimplemented!");
-}
+ return lower1BitVectorShuffle(DL, Mask, VT, V1, V2, Subtarget, DAG);
-// This function assumes its argument is a BUILD_VECTOR of constants or
-// undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
-// true.
-static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
- unsigned &MaskValue) {
- MaskValue = 0;
- unsigned NumElems = BuildVector->getNumOperands();
-
- // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
- // We don't handle the >2 lanes case right now.
- unsigned NumLanes = (NumElems - 1) / 8 + 1;
- if (NumLanes > 2)
- return false;
-
- unsigned NumElemsInLane = NumElems / NumLanes;
-
- // Blend for v16i16 should be symmetric for the both lanes.
- for (unsigned i = 0; i < NumElemsInLane; ++i) {
- SDValue EltCond = BuildVector->getOperand(i);
- SDValue SndLaneEltCond =
- (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
-
- int Lane1Cond = -1, Lane2Cond = -1;
- if (isa<ConstantSDNode>(EltCond))
- Lane1Cond = !isNullConstant(EltCond);
- if (isa<ConstantSDNode>(SndLaneEltCond))
- Lane2Cond = !isNullConstant(SndLaneEltCond);
-
- unsigned LaneMask = 0;
- if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
- // Lane1Cond != 0, means we want the first argument.
- // Lane1Cond == 0, means we want the second argument.
- // The encoding of this argument is 0 for the first argument, 1
- // for the second. Therefore, invert the condition.
- LaneMask = !Lane1Cond << i;
- else if (Lane1Cond < 0)
- LaneMask = !Lane2Cond << i;
- else
- return false;
-
- MaskValue |= LaneMask;
- if (NumLanes == 2)
- MaskValue |= LaneMask << NumElemsInLane;
- }
- return true;
+ llvm_unreachable("Unimplemented!");
}
/// \brief Try to lower a VSELECT instruction to a vector shuffle.
static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue Cond = Op.getOperand(0);
SDValue LHS = Op.getOperand(1);
@@ -11624,7 +12328,7 @@ SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
return BlendOp;
// Variable blends are only legal from SSE4.1 onward.
- if (!Subtarget->hasSSE41())
+ if (!Subtarget.hasSSE41())
return SDValue();
// Only some types will be legal on some subtargets. If we can emit a legal
@@ -11637,7 +12341,7 @@ SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
case MVT::v32i8:
// The byte blends for AVX vectors were introduced only in AVX2.
- if (Subtarget->hasAVX2())
+ if (Subtarget.hasAVX2())
return Op;
return SDValue();
@@ -11645,7 +12349,7 @@ SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
case MVT::v8i16:
case MVT::v16i16:
// AVX-512 BWI and VLX features support VSELECT with i16 elements.
- if (Subtarget->hasBWI() && Subtarget->hasVLX())
+ if (Subtarget.hasBWI() && Subtarget.hasVLX())
return Op;
// FIXME: We should custom lower this by fixing the condition and using i8
@@ -11723,7 +12427,7 @@ X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const
MVT EltVT = Op.getSimpleValueType();
assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
- assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
+ assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
"Unexpected vector type in ExtractBitFromMaskVector");
// variable index can't be handled in mask registers,
@@ -11737,10 +12441,15 @@ X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const
}
unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
- const TargetRegisterClass* rc = getRegClassFor(VecVT);
- if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
- rc = getRegClassFor(MVT::v16i1);
- unsigned MaxSift = rc->getSize()*8 - 1;
+ if (!Subtarget.hasDQI() && (VecVT.getVectorNumElements() <= 8)) {
+ // Use kshiftlw/rw instruction.
+ VecVT = MVT::v16i1;
+ Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT,
+ DAG.getUNDEF(VecVT),
+ Vec,
+ DAG.getIntPtrConstant(0, dl));
+ }
+ unsigned MaxSift = VecVT.getVectorNumElements() - 1;
Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
DAG.getConstant(MaxSift - IdxVal, dl, MVT::i8));
Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
@@ -11762,7 +12471,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
if (!isa<ConstantSDNode>(Idx)) {
if (VecVT.is512BitVector() ||
- (VecVT.is256BitVector() && Subtarget->hasInt256() &&
+ (VecVT.is256BitVector() && Subtarget.hasInt256() &&
VecVT.getVectorElementType().getSizeInBits() == 32)) {
MVT MaskEltVT =
@@ -11782,13 +12491,13 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
return SDValue();
}
+ unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+
// If this is a 256-bit vector result, first extract the 128-bit vector and
// then extract the element from the 128-bit vector.
if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
-
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
// Get the 128-bit vector.
- Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
+ Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
MVT EltVT = VecVT.getVectorElementType();
unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
@@ -11803,38 +12512,33 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
assert(VecVT.is128BitVector() && "Unexpected vector length");
- if (Subtarget->hasSSE41())
+ if (Subtarget.hasSSE41())
if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
return Res;
MVT VT = Op.getSimpleValueType();
// TODO: handle v16i8.
if (VT.getSizeInBits() == 16) {
- SDValue Vec = Op.getOperand(0);
- if (isNullConstant(Op.getOperand(1)))
+ if (IdxVal == 0)
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
- DAG.getBitcast(MVT::v4i32, Vec),
- Op.getOperand(1)));
+ DAG.getBitcast(MVT::v4i32, Vec), Idx));
+
// Transform it so it match pextrw which produces a 32-bit result.
MVT EltVT = MVT::i32;
- SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
- Op.getOperand(0), Op.getOperand(1));
+ SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, Vec, Idx);
SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
DAG.getValueType(VT));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
}
if (VT.getSizeInBits() == 32) {
- unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
- if (Idx == 0)
+ if (IdxVal == 0)
return Op;
// SHUFPS the element to the lowest double word, then movss.
- int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
- MVT VVT = Op.getOperand(0).getSimpleValueType();
- SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
- DAG.getUNDEF(VVT), Mask);
+ int Mask[4] = { static_cast<int>(IdxVal), -1, -1, -1 };
+ Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
DAG.getIntPtrConstant(0, dl));
}
@@ -11843,16 +12547,14 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
// FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
// FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
// to match extract_elt for f64.
- if (isNullConstant(Op.getOperand(1)))
+ if (IdxVal == 0)
return Op;
// UNPCKHPD the element to the lowest double word, then movsd.
// Note if the lower 64 bits of the result of the UNPCKHPD is then stored
// to a f64mem, the whole operation is folded into a single MOVHPDmr.
int Mask[2] = { 1, -1 };
- MVT VVT = Op.getOperand(0).getSimpleValueType();
- SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
- DAG.getUNDEF(VVT), Mask);
+ Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
DAG.getIntPtrConstant(0, dl));
}
@@ -11886,7 +12588,7 @@ X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
if (IdxVal)
EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
DAG.getConstant(IdxVal, dl, MVT::i8));
- if (Vec.getOpcode() == ISD::UNDEF)
+ if (Vec.isUndef())
return EltInVec;
return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
}
@@ -11895,6 +12597,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const {
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
+ unsigned NumElts = VT.getVectorNumElements();
if (EltVT == MVT::i1)
return InsertBitToMaskVector(Op, DAG);
@@ -11908,6 +12611,19 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
auto *N2C = cast<ConstantSDNode>(N2);
unsigned IdxVal = N2C->getZExtValue();
+ // If we are clearing out a element, we do this more efficiently with a
+ // blend shuffle than a costly integer insertion.
+ // TODO: would other rematerializable values (e.g. allbits) benefit as well?
+ // TODO: pre-SSE41 targets will tend to use bit masking - this could still
+ // be beneficial if we are inserting several zeros and can combine the masks.
+ if (X86::isZeroNode(N1) && Subtarget.hasSSE41() && NumElts <= 8) {
+ SmallVector<int, 8> ClearMask;
+ for (unsigned i = 0; i != NumElts; ++i)
+ ClearMask.push_back(i == IdxVal ? i + NumElts : i);
+ SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, dl);
+ return DAG.getVectorShuffle(VT, dl, N0, ZeroVector, ClearMask);
+ }
+
// If the vector is wider than 128 bits, extract the 128-bit subvector, insert
// into that, and then insert the subvector back into the result.
if (VT.is256BitVector() || VT.is512BitVector()) {
@@ -11917,8 +12633,8 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
// TODO: It is worthwhile to cast integer to floating point and back
// and incur a domain crossing penalty if that's what we'll end up
// doing anyway after extracting to a 128-bit vector.
- if ((Subtarget->hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
- (Subtarget->hasAVX2() && EltVT == MVT::i32)) {
+ if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
+ (Subtarget.hasAVX2() && EltVT == MVT::i32)) {
SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
N2 = DAG.getIntPtrConstant(1, dl);
return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec, N2);
@@ -11926,7 +12642,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
}
// Get the desired 128-bit vector chunk.
- SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
+ SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
// Insert the element into the desired chunk.
unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
@@ -11938,11 +12654,11 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
DAG.getConstant(IdxIn128, dl, MVT::i32));
// Insert the changed part back into the bigger vector
- return Insert128BitVector(N0, V, IdxVal, DAG, dl);
+ return insert128BitVector(N0, V, IdxVal, DAG, dl);
}
assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
unsigned Opc;
if (VT == MVT::v8i16) {
@@ -12026,7 +12742,7 @@ static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
// Insert the 128-bit vector.
- return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
+ return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
}
if (OpVT == MVT::v1i64 &&
@@ -12042,7 +12758,7 @@ static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
// Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
// a simple subregister reference or explicit instructions to grab
// upper bits of a vector.
-static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc dl(Op);
SDValue In = Op.getOperand(0);
@@ -12051,15 +12767,15 @@ static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
MVT ResVT = Op.getSimpleValueType();
MVT InVT = In.getSimpleValueType();
- if (Subtarget->hasFp256()) {
+ if (Subtarget.hasFp256()) {
if (ResVT.is128BitVector() &&
(InVT.is256BitVector() || InVT.is512BitVector()) &&
isa<ConstantSDNode>(Idx)) {
- return Extract128BitVector(In, IdxVal, DAG, dl);
+ return extract128BitVector(In, IdxVal, DAG, dl);
}
if (ResVT.is256BitVector() && InVT.is512BitVector() &&
isa<ConstantSDNode>(Idx)) {
- return Extract256BitVector(In, IdxVal, DAG, dl);
+ return extract256BitVector(In, IdxVal, DAG, dl);
}
}
return SDValue();
@@ -12068,9 +12784,9 @@ static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
// simple superregister reference or explicit instructions to insert
// the upper bits of a vector.
-static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- if (!Subtarget->hasAVX())
+ if (!Subtarget.hasAVX())
return SDValue();
SDLoc dl(Op);
@@ -12094,16 +12810,13 @@ static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
OpVT.is256BitVector() && SubVecVT.is128BitVector()) {
auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2));
if (Idx2 && Idx2->getZExtValue() == 0) {
- SDValue SubVec2 = Vec.getOperand(1);
- // If needed, look through a bitcast to get to the load.
- if (SubVec2.getNode() && SubVec2.getOpcode() == ISD::BITCAST)
- SubVec2 = SubVec2.getOperand(0);
-
+ // If needed, look through bitcasts to get to the load.
+ SDValue SubVec2 = peekThroughBitcasts(Vec.getOperand(1));
if (auto *FirstLd = dyn_cast<LoadSDNode>(SubVec2)) {
bool Fast;
unsigned Alignment = FirstLd->getAlignment();
unsigned AS = FirstLd->getAddressSpace();
- const X86TargetLowering *TLI = Subtarget->getTargetLowering();
+ const X86TargetLowering *TLI = Subtarget.getTargetLowering();
if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(),
OpVT, AS, Alignment, &Fast) && Fast) {
SDValue Ops[] = { SubVec2, SubVec };
@@ -12116,13 +12829,13 @@ static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
SubVecVT.is128BitVector())
- return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
+ return insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
- return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
+ return insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
if (OpVT.getVectorElementType() == MVT::i1)
- return Insert1BitVector(Op, DAG);
+ return insert1BitVector(Op, DAG, Subtarget);
return SDValue();
}
@@ -12139,17 +12852,13 @@ X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
- unsigned char OpFlag = 0;
+ unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
unsigned WrapperKind = X86ISD::Wrapper;
CodeModel::Model M = DAG.getTarget().getCodeModel();
- if (Subtarget->isPICStyleRIPRel() &&
+ if (Subtarget.isPICStyleRIPRel() &&
(M == CodeModel::Small || M == CodeModel::Kernel))
WrapperKind = X86ISD::WrapperRIP;
- else if (Subtarget->isPICStyleGOT())
- OpFlag = X86II::MO_GOTOFF;
- else if (Subtarget->isPICStyleStubPIC())
- OpFlag = X86II::MO_PIC_BASE_OFFSET;
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetConstantPool(
@@ -12171,17 +12880,13 @@ SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
- unsigned char OpFlag = 0;
+ unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
unsigned WrapperKind = X86ISD::Wrapper;
CodeModel::Model M = DAG.getTarget().getCodeModel();
- if (Subtarget->isPICStyleRIPRel() &&
+ if (Subtarget.isPICStyleRIPRel() &&
(M == CodeModel::Small || M == CodeModel::Kernel))
WrapperKind = X86ISD::WrapperRIP;
- else if (Subtarget->isPICStyleGOT())
- OpFlag = X86II::MO_GOTOFF;
- else if (Subtarget->isPICStyleStubPIC())
- OpFlag = X86II::MO_PIC_BASE_OFFSET;
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
@@ -12203,22 +12908,14 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
- unsigned char OpFlag = 0;
+ const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
+ unsigned char OpFlag = Subtarget.classifyGlobalReference(nullptr, *Mod);
unsigned WrapperKind = X86ISD::Wrapper;
CodeModel::Model M = DAG.getTarget().getCodeModel();
- if (Subtarget->isPICStyleRIPRel() &&
- (M == CodeModel::Small || M == CodeModel::Kernel)) {
- if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
- OpFlag = X86II::MO_GOTPCREL;
+ if (Subtarget.isPICStyleRIPRel() &&
+ (M == CodeModel::Small || M == CodeModel::Kernel))
WrapperKind = X86ISD::WrapperRIP;
- } else if (Subtarget->isPICStyleGOT()) {
- OpFlag = X86II::MO_GOT;
- } else if (Subtarget->isPICStyleStubPIC()) {
- OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
- } else if (Subtarget->isPICStyleStubNoDynamic()) {
- OpFlag = X86II::MO_DARWIN_NONLAZY;
- }
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetExternalSymbol(Sym, PtrVT, OpFlag);
@@ -12227,8 +12924,7 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
// With PIC, the address is actually $g + Offset.
- if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
- !Subtarget->is64Bit()) {
+ if (isPositionIndependent() && !Subtarget.is64Bit()) {
Result =
DAG.getNode(ISD::ADD, DL, PtrVT,
DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
@@ -12238,8 +12934,7 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
// load.
if (isGlobalStubReference(OpFlag))
Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()),
- false, false, false, 0);
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
return Result;
}
@@ -12248,7 +12943,7 @@ SDValue
X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
// Create the TargetBlockAddressAddress node.
unsigned char OpFlags =
- Subtarget->ClassifyBlockAddressReference();
+ Subtarget.classifyBlockAddressReference();
CodeModel::Model M = DAG.getTarget().getCodeModel();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
@@ -12256,7 +12951,7 @@ X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
- if (Subtarget->isPICStyleRIPRel() &&
+ if (Subtarget.isPICStyleRIPRel() &&
(M == CodeModel::Small || M == CodeModel::Kernel))
Result = DAG.getNode(X86ISD::WrapperRIP, dl, PtrVT, Result);
else
@@ -12271,13 +12966,12 @@ X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
return Result;
}
-SDValue
-X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
- int64_t Offset, SelectionDAG &DAG) const {
+SDValue X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV,
+ const SDLoc &dl, int64_t Offset,
+ SelectionDAG &DAG) const {
// Create the TargetGlobalAddress node, folding in the constant
// offset if it is legal.
- unsigned char OpFlags =
- Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
+ unsigned char OpFlags = Subtarget.classifyGlobalReference(GV);
CodeModel::Model M = DAG.getTarget().getCodeModel();
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result;
@@ -12290,7 +12984,7 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, OpFlags);
}
- if (Subtarget->isPICStyleRIPRel() &&
+ if (Subtarget.isPICStyleRIPRel() &&
(M == CodeModel::Small || M == CodeModel::Kernel))
Result = DAG.getNode(X86ISD::WrapperRIP, dl, PtrVT, Result);
else
@@ -12306,8 +13000,7 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
// load.
if (isGlobalStubReference(OpFlags))
Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()),
- false, false, false, 0);
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
// If there was a non-zero offset that we didn't fold, create an explicit
// addition for it.
@@ -12429,7 +13122,7 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
SDValue ThreadPointer =
DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
- MachinePointerInfo(Ptr), false, false, false, 0);
+ MachinePointerInfo(Ptr));
unsigned char OperandFlags = 0;
// Most TLS accesses are not RIP relative, even on x86-64. One exception is
@@ -12464,8 +13157,7 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
}
Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()),
- false, false, false, 0);
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
}
// The address of the thread local variable is the add of the thread
@@ -12478,45 +13170,40 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
- // Cygwin uses emutls.
- // FIXME: It may be EmulatedTLS-generic also for X86-Android.
- if (Subtarget->isTargetWindowsCygwin())
+ if (DAG.getTarget().Options.EmulatedTLS)
return LowerToTLSEmulatedModel(GA, DAG);
const GlobalValue *GV = GA->getGlobal();
auto PtrVT = getPointerTy(DAG.getDataLayout());
+ bool PositionIndependent = isPositionIndependent();
- if (Subtarget->isTargetELF()) {
- if (DAG.getTarget().Options.EmulatedTLS)
- return LowerToTLSEmulatedModel(GA, DAG);
+ if (Subtarget.isTargetELF()) {
TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
switch (model) {
case TLSModel::GeneralDynamic:
- if (Subtarget->is64Bit())
+ if (Subtarget.is64Bit())
return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
case TLSModel::LocalDynamic:
return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT,
- Subtarget->is64Bit());
+ Subtarget.is64Bit());
case TLSModel::InitialExec:
case TLSModel::LocalExec:
- return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget->is64Bit(),
- DAG.getTarget().getRelocationModel() ==
- Reloc::PIC_);
+ return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
+ PositionIndependent);
}
llvm_unreachable("Unknown TLS model.");
}
- if (Subtarget->isTargetDarwin()) {
+ if (Subtarget.isTargetDarwin()) {
// Darwin only has one model of TLS. Lower to that.
unsigned char OpFlag = 0;
- unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
+ unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
X86ISD::WrapperRIP : X86ISD::Wrapper;
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
- bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
- !Subtarget->is64Bit();
+ bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
if (PIC32)
OpFlag = X86II::MO_TLVP_PIC_BASE;
else
@@ -12540,9 +13227,9 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, DL, true), DL);
SDValue Args[] = { Chain, Offset };
Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
- Chain =
- DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
- DAG.getIntPtrConstant(0, DL, true), SDValue(), DL);
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
+ DAG.getIntPtrConstant(0, DL, true),
+ Chain.getValue(1), DL);
// TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
@@ -12550,12 +13237,13 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
// And our return value (tls address) is in the standard call return value
// location.
- unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
+ unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
}
- if (Subtarget->isTargetKnownWindowsMSVC() ||
- Subtarget->isTargetWindowsGNU()) {
+ if (Subtarget.isTargetKnownWindowsMSVC() ||
+ Subtarget.isTargetWindowsItanium() ||
+ Subtarget.isTargetWindowsGNU()) {
// Just use the implicit TLS architecture
// Need to generate someting similar to:
// mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
@@ -12573,21 +13261,20 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
// Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
// %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
// use its literal value of 0x2C.
- Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
+ Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
? Type::getInt8PtrTy(*DAG.getContext(),
256)
: Type::getInt32PtrTy(*DAG.getContext(),
257));
- SDValue TlsArray = Subtarget->is64Bit()
+ SDValue TlsArray = Subtarget.is64Bit()
? DAG.getIntPtrConstant(0x58, dl)
- : (Subtarget->isTargetWindowsGNU()
+ : (Subtarget.isTargetWindowsGNU()
? DAG.getIntPtrConstant(0x2C, dl)
: DAG.getExternalSymbol("_tls_array", PtrVT));
SDValue ThreadPointer =
- DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr), false,
- false, false, 0);
+ DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
SDValue res;
if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
@@ -12595,13 +13282,11 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
} else {
// Load the _tls_index variable
SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
- if (Subtarget->is64Bit())
+ if (Subtarget.is64Bit())
IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
- MachinePointerInfo(), MVT::i32, false, false,
- false, 0);
+ MachinePointerInfo(), MVT::i32);
else
- IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo(), false,
- false, false, 0);
+ IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
auto &DL = DAG.getDataLayout();
SDValue Scale =
@@ -12611,8 +13296,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
}
- res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo(), false, false,
- false, 0);
+ res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
// Get the offset of start of .tls section
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
@@ -12628,7 +13312,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
llvm_unreachable("TLS not implemented for this target.");
}
-/// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
+/// Lower SRA_PARTS and friends, which return two i32 values
/// and take a 2 x i32 value to shift plus a shift amount.
static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
assert(Op.getNumOperands() == 3 && "Not a double-shift!");
@@ -12711,13 +13395,13 @@ SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
return Op;
if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
- Subtarget->is64Bit()) {
+ Subtarget.is64Bit()) {
return Op;
}
SDValue ValueToStore = Op.getOperand(0);
if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
- !Subtarget->is64Bit())
+ !Subtarget.is64Bit())
// Bitcasting to f64 here allows us to do a single 64-bit store from
// an SSE register, avoiding the store forwarding penalty that would come
// with two 32-bit stores.
@@ -12730,8 +13414,7 @@ SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
SDValue Chain = DAG.getStore(
DAG.getEntryNode(), dl, ValueToStore, StackSlot,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI), false,
- false, 0);
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
}
@@ -12789,14 +13472,13 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
Ops, Op.getValueType(), MMO);
Result = DAG.getLoad(
Op.getValueType(), DL, Chain, StackSlot,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
- false, false, false, 0);
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
}
return Result;
}
-// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
+/// 64-bit unsigned integer to double expansion.
SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
SelectionDAG &DAG) const {
// This algorithm is not obvious. Here it is what we're trying to output:
@@ -12837,20 +13519,20 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
SDValue CLod0 =
DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
- false, false, false, 16);
+ /* Alignment = */ 16);
SDValue Unpck1 =
getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
SDValue CLod1 =
DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
- false, false, false, 16);
+ /* Alignment = */ 16);
SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
// TODO: Are there any fast-math-flags to propagate here?
SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
SDValue Result;
- if (Subtarget->hasSSE3()) {
+ if (Subtarget.hasSSE3()) {
// FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
} else {
@@ -12865,7 +13547,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
DAG.getIntPtrConstant(0, dl));
}
-// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
+/// 32-bit unsigned integer to float expansion.
SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
@@ -12945,10 +13627,8 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
if (VecFloatVT != Op->getSimpleValueType(0))
return SDValue();
- unsigned NumElts = VecIntVT.getVectorNumElements();
assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
"Unsupported custom type");
- assert(NumElts <= 8 && "The size of the constant array must be fixed");
// In the #idef/#else code, we have in common:
// - The vector of constants:
@@ -12958,24 +13638,12 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
// -- v >> 16
// Create the splat vector for 0x4b000000.
- SDValue CstLow = DAG.getConstant(0x4b000000, DL, MVT::i32);
- SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
- CstLow, CstLow, CstLow, CstLow};
- SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
- makeArrayRef(&CstLowArray[0], NumElts));
+ SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
// Create the splat vector for 0x53000000.
- SDValue CstHigh = DAG.getConstant(0x53000000, DL, MVT::i32);
- SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
- CstHigh, CstHigh, CstHigh, CstHigh};
- SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
- makeArrayRef(&CstHighArray[0], NumElts));
+ SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
// Create the right shift.
- SDValue CstShift = DAG.getConstant(16, DL, MVT::i32);
- SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
- CstShift, CstShift, CstShift, CstShift};
- SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
- makeArrayRef(&CstShiftArray[0], NumElts));
+ SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
SDValue Low, High;
@@ -12997,9 +13665,7 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
VecCstHighBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
} else {
- SDValue CstMask = DAG.getConstant(0xffff, DL, MVT::i32);
- SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
- CstMask, CstMask, CstMask);
+ SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
// uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
@@ -13009,12 +13675,8 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
}
// Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
- SDValue CstFAdd = DAG.getConstantFP(
- APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), DL, MVT::f32);
- SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
- CstFAdd, CstFAdd, CstFAdd, CstFAdd};
- SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
- makeArrayRef(&CstFAddArray[0], NumElts));
+ SDValue VecCstFAdd = DAG.getConstantFP(
+ APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), DL, VecFloatVT);
// float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
@@ -13045,10 +13707,10 @@ SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
}
case MVT::v4i32:
case MVT::v8i32:
- return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
+ return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
case MVT::v16i8:
case MVT::v16i16:
- assert(Subtarget->hasAVX512());
+ assert(Subtarget.hasAVX512());
return DAG.getNode(ISD::UINT_TO_FP, dl, Op.getValueType(),
DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v16i32, N0));
}
@@ -13072,8 +13734,8 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
MVT SrcVT = N0.getSimpleValueType();
MVT DstVT = Op.getSimpleValueType();
- if (Subtarget->hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
- (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget->is64Bit()))) {
+ if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
+ (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
// Conversions from unsigned i32 to f32/f64 are legal,
// using VCVTUSI2SS/SD. Same for i64 in 64-bit mode.
return Op;
@@ -13083,34 +13745,30 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
return LowerUINT_TO_FP_i64(Op, DAG);
if (SrcVT == MVT::i32 && X86ScalarSSEf64)
return LowerUINT_TO_FP_i32(Op, DAG);
- if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
+ if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
return SDValue();
// Make a 64-bit buffer, and use it to build an FILD.
SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
if (SrcVT == MVT::i32) {
- SDValue WordOff = DAG.getConstant(4, dl, PtrVT);
- SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, WordOff);
+ SDValue OffsetSlot = DAG.getMemBasePlusOffset(StackSlot, 4, dl);
SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
- StackSlot, MachinePointerInfo(),
- false, false, 0);
+ StackSlot, MachinePointerInfo());
SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
- OffsetSlot, MachinePointerInfo(),
- false, false, 0);
+ OffsetSlot, MachinePointerInfo());
SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
return Fild;
}
assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
SDValue ValueToStore = Op.getOperand(0);
- if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget->is64Bit())
+ if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit())
// Bitcasting to f64 here allows us to do a single 64-bit store from
// an SSE register, avoiding the store forwarding penalty that would come
// with two 32-bit stores.
ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
- SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, ValueToStore,
- StackSlot, MachinePointerInfo(),
- false, false, 0);
+ SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, ValueToStore, StackSlot,
+ MachinePointerInfo());
// For i64 source, we need to add the appropriate power of 2 if the input
// was negative. This is the same as the optimization in
// DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
@@ -13149,7 +13807,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
SDValue Fudge = DAG.getExtLoad(
ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), FudgePtr,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
- false, false, false, 4);
+ /* Alignment = */ 4);
// Extend everything to 80 bits to force it to be done on x87.
// TODO: Are there any fast-math-flags to propagate here?
SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
@@ -13186,10 +13844,10 @@ X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
// used for the 32-bit subtarget, but also for f80 on a 64-bit target.
bool UnsignedFixup = !IsSigned &&
DstTy == MVT::i64 &&
- (!Subtarget->is64Bit() ||
+ (!Subtarget.is64Bit() ||
!isScalarFPTypeInSSEReg(TheVT));
- if (!IsSigned && DstTy != MVT::i64 && !Subtarget->hasAVX512()) {
+ if (!IsSigned && DstTy != MVT::i64 && !Subtarget.hasAVX512()) {
// Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
// The low 32 bits of the fist result will have the correct uint32 result.
assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
@@ -13204,7 +13862,7 @@ X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
if (DstTy == MVT::i32 &&
isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
return std::make_pair(SDValue(), SDValue());
- if (Subtarget->is64Bit() &&
+ if (Subtarget.is64Bit() &&
DstTy == MVT::i64 &&
isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
return std::make_pair(SDValue(), SDValue());
@@ -13280,8 +13938,7 @@ X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
if (isScalarFPTypeInSSEReg(TheVT)) {
assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
Chain = DAG.getStore(Chain, DL, Value, StackSlot,
- MachinePointerInfo::getFixedStack(MF, SSFI), false,
- false, 0);
+ MachinePointerInfo::getFixedStack(MF, SSFI));
SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
SDValue Ops[] = {
Chain, StackSlot, DAG.getValueType(TheVT)
@@ -13309,18 +13966,15 @@ X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
FistOps, DstTy, MMO);
- SDValue Low32 = DAG.getLoad(MVT::i32, DL, FIST, StackSlot,
- MachinePointerInfo(),
- false, false, false, 0);
- SDValue HighAddr = DAG.getNode(ISD::ADD, DL, PtrVT, StackSlot,
- DAG.getConstant(4, DL, PtrVT));
+ SDValue Low32 =
+ DAG.getLoad(MVT::i32, DL, FIST, StackSlot, MachinePointerInfo());
+ SDValue HighAddr = DAG.getMemBasePlusOffset(StackSlot, 4, DL);
- SDValue High32 = DAG.getLoad(MVT::i32, DL, FIST, HighAddr,
- MachinePointerInfo(),
- false, false, false, 0);
+ SDValue High32 =
+ DAG.getLoad(MVT::i32, DL, FIST, HighAddr, MachinePointerInfo());
High32 = DAG.getNode(ISD::XOR, DL, MVT::i32, High32, Adjust);
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
// Join High32 and Low32 into a 64-bit result.
// (High32 << 32) | Low32
Low32 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Low32);
@@ -13347,7 +14001,7 @@ X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
}
static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
MVT InVT = In.getSimpleValueType();
@@ -13374,7 +14028,7 @@ static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
return SDValue();
- if (Subtarget->hasInt256())
+ if (Subtarget.hasInt256())
return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
@@ -13393,41 +14047,46 @@ static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
}
static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
- const X86Subtarget *Subtarget, SelectionDAG &DAG) {
+ const X86Subtarget &Subtarget, SelectionDAG &DAG) {
MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
MVT InVT = In.getSimpleValueType();
SDLoc DL(Op);
unsigned int NumElts = VT.getVectorNumElements();
- if (NumElts != 8 && NumElts != 16 && !Subtarget->hasBWI())
+ if (NumElts != 8 && NumElts != 16 && !Subtarget.hasBWI())
return SDValue();
if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
assert(InVT.getVectorElementType() == MVT::i1);
- MVT ExtVT = NumElts == 8 ? MVT::v8i64 : MVT::v16i32;
+
+ // Extend VT if the target is 256 or 128bit vector and VLX is not supported.
+ MVT ExtVT = VT;
+ if (!VT.is512BitVector() && !Subtarget.hasVLX())
+ ExtVT = MVT::getVectorVT(MVT::getIntegerVT(512/NumElts), NumElts);
+
SDValue One =
DAG.getConstant(APInt(ExtVT.getScalarSizeInBits(), 1), DL, ExtVT);
SDValue Zero =
DAG.getConstant(APInt::getNullValue(ExtVT.getScalarSizeInBits()), DL, ExtVT);
- SDValue V = DAG.getNode(ISD::VSELECT, DL, ExtVT, In, One, Zero);
- if (VT.is512BitVector())
- return V;
- return DAG.getNode(X86ISD::VTRUNC, DL, VT, V);
+ SDValue SelectedVal = DAG.getNode(ISD::VSELECT, DL, ExtVT, In, One, Zero);
+ if (VT == ExtVT)
+ return SelectedVal;
+ return DAG.getNode(X86ISD::VTRUNC, DL, VT, SelectedVal);
}
-static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- if (Subtarget->hasFp256())
+ if (Subtarget.hasFp256())
if (SDValue Res = LowerAVXExtend(Op, DAG, Subtarget))
return Res;
return SDValue();
}
-static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
@@ -13437,7 +14096,7 @@ static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
return LowerZERO_EXTEND_AVX512(Op, Subtarget, DAG);
- if (Subtarget->hasFp256())
+ if (Subtarget.hasFp256())
if (SDValue Res = LowerAVXExtend(Op, DAG, Subtarget))
return Res;
@@ -13447,50 +14106,32 @@ static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
}
static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
SDValue In = Op.getOperand(0);
MVT InVT = In.getSimpleValueType();
- assert(VT.getVectorElementType() == MVT::i1 && "Unexected vector type.");
+ assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
- // Shift LSB to MSB and use VPMOVB2M - SKX.
+ // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
- if ((InVT.is512BitVector() && InVT.getScalarSizeInBits() <= 16 &&
- Subtarget->hasBWI()) || // legal, will go to VPMOVB2M, VPMOVW2M
- ((InVT.is256BitVector() || InVT.is128BitVector()) &&
- InVT.getScalarSizeInBits() <= 16 && Subtarget->hasBWI() &&
- Subtarget->hasVLX())) { // legal, will go to VPMOVB2M, VPMOVW2M
- // Shift packed bytes not supported natively, bitcast to dword
- MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
- SDValue ShiftNode = DAG.getNode(ISD::SHL, DL, ExtVT,
- DAG.getBitcast(ExtVT, In),
- DAG.getConstant(ShiftInx, DL, ExtVT));
- ShiftNode = DAG.getBitcast(InVT, ShiftNode);
- return DAG.getNode(X86ISD::CVT2MASK, DL, VT, ShiftNode);
- }
- if ((InVT.is512BitVector() && InVT.getScalarSizeInBits() >= 32 &&
- Subtarget->hasDQI()) || // legal, will go to VPMOVD2M, VPMOVQ2M
- ((InVT.is256BitVector() || InVT.is128BitVector()) &&
- InVT.getScalarSizeInBits() >= 32 && Subtarget->hasDQI() &&
- Subtarget->hasVLX())) { // legal, will go to VPMOVD2M, VPMOVQ2M
-
- SDValue ShiftNode = DAG.getNode(ISD::SHL, DL, InVT, In,
- DAG.getConstant(ShiftInx, DL, InVT));
- return DAG.getNode(X86ISD::CVT2MASK, DL, VT, ShiftNode);
- }
-
- // Shift LSB to MSB, extend if necessary and use TESTM.
- unsigned NumElts = InVT.getVectorNumElements();
- if (InVT.getSizeInBits() < 512 &&
- (InVT.getScalarType() == MVT::i8 || InVT.getScalarType() == MVT::i16 ||
- !Subtarget->hasVLX())) {
- assert((NumElts == 8 || NumElts == 16) && "Unexected vector type.");
-
- // TESTD/Q should be used (if BW supported we use CVT2MASK above),
- // so vector should be extended to packed dword/qword.
+ if (InVT.getScalarSizeInBits() <= 16) {
+ if (Subtarget.hasBWI()) {
+ // legal, will go to VPMOVB2M, VPMOVW2M
+ // Shift packed bytes not supported natively, bitcast to word
+ MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
+ SDValue ShiftNode = DAG.getNode(ISD::SHL, DL, ExtVT,
+ DAG.getBitcast(ExtVT, In),
+ DAG.getConstant(ShiftInx, DL, ExtVT));
+ ShiftNode = DAG.getBitcast(InVT, ShiftNode);
+ return DAG.getNode(X86ISD::CVT2MASK, DL, VT, ShiftNode);
+ }
+ // Use TESTD/Q, extended vector to packed dword/qword.
+ assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
+ "Unexpected vector type.");
+ unsigned NumElts = InVT.getVectorNumElements();
MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(512/NumElts), NumElts);
In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
InVT = ExtVT;
@@ -13523,16 +14164,16 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
return LowerTruncateVecI1(Op, DAG, Subtarget);
// vpmovqb/w/d, vpmovdb/w, vpmovwb
- if (Subtarget->hasAVX512()) {
+ if (Subtarget.hasAVX512()) {
// word to byte only under BWI
- if (InVT == MVT::v16i16 && !Subtarget->hasBWI()) // v16i16 -> v16i8
+ if (InVT == MVT::v16i16 && !Subtarget.hasBWI()) // v16i16 -> v16i8
return DAG.getNode(X86ISD::VTRUNC, DL, VT,
DAG.getNode(X86ISD::VSEXT, DL, MVT::v16i32, In));
return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
}
if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
// On AVX2, v4i64 -> v4i32 becomes VPERMD.
- if (Subtarget->hasInt256()) {
+ if (Subtarget.hasInt256()) {
static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
In = DAG.getBitcast(MVT::v8i32, In);
In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
@@ -13553,7 +14194,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
// On AVX2, v8i32 -> v8i16 becomed PSHUFB.
- if (Subtarget->hasInt256()) {
+ if (Subtarget.hasInt256()) {
In = DAG.getBitcast(MVT::v32i8, In);
SmallVector<SDValue,32> pshufbMask;
@@ -13569,13 +14210,13 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
for (unsigned j = 0; j < 8; ++j)
pshufbMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
}
- SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
+ SDValue BV = DAG.getBuildVector(MVT::v32i8, DL, pshufbMask);
In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
In = DAG.getBitcast(MVT::v4i64, In);
static const int ShufMask[] = {0, 2, -1, -1};
In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
- &ShufMask[0]);
+ ShufMask);
In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
DAG.getIntPtrConstant(0, DL));
return DAG.getBitcast(VT, In);
@@ -13611,7 +14252,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
if (!VT.is128BitVector() || !InVT.is256BitVector())
return SDValue();
- assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
+ assert(Subtarget.hasFp256() && "256-bit vector without AVX!");
unsigned NumElems = VT.getVectorNumElements();
MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
@@ -13621,7 +14262,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
for (unsigned i = 0; i != NumElems; ++i)
MaskVec[i] = i * 2;
SDValue V = DAG.getVectorShuffle(NVT, DL, DAG.getBitcast(NVT, In),
- DAG.getUNDEF(NVT), &MaskVec[0]);
+ DAG.getUNDEF(NVT), MaskVec);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
DAG.getIntPtrConstant(0, DL));
}
@@ -13639,9 +14280,8 @@ SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
if (StackSlot.getNode())
// Load the result.
- return DAG.getLoad(Op.getValueType(), SDLoc(Op),
- FIST, StackSlot, MachinePointerInfo(),
- false, false, false, 0);
+ return DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot,
+ MachinePointerInfo());
// The node is the result.
return FIST;
@@ -13658,9 +14298,8 @@ SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
if (StackSlot.getNode())
// Load the result.
- return DAG.getLoad(Op.getValueType(), SDLoc(Op),
- FIST, StackSlot, MachinePointerInfo(),
- false, false, false, 0);
+ return DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot,
+ MachinePointerInfo());
// The node is the result.
return FIST;
@@ -13736,10 +14375,9 @@ static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
- SDValue Mask =
- DAG.getLoad(LogicVT, dl, DAG.getEntryNode(), CPIdx,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
- false, false, false, Alignment);
+ SDValue Mask = DAG.getLoad(
+ LogicVT, dl, DAG.getEntryNode(), CPIdx,
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Alignment);
SDValue Op0 = Op.getOperand(0);
bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
@@ -13807,7 +14445,7 @@ static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
SDValue Mask1 =
DAG.getLoad(LogicVT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
- false, false, false, 16);
+ /* Alignment = */ 16);
if (!IsF128)
Op1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Op1);
SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Op1, Mask1);
@@ -13833,7 +14471,7 @@ static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
SDValue Val =
DAG.getLoad(LogicVT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
- false, false, false, 16);
+ /* Alignment = */ 16);
// If the magnitude operand wasn't a constant, we need to AND out the sign.
if (!isa<ConstantFPSDNode>(Op0)) {
if (!IsF128)
@@ -13852,18 +14490,25 @@ static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
- // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
- SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
- DAG.getConstant(1, dl, VT));
- return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, dl, VT));
+ MVT OpVT = N0.getSimpleValueType();
+ assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
+ "Unexpected type for FGETSIGN");
+
+ // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
+ MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
+ SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
+ Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
+ Res = DAG.getZExtOrTrunc(Res, dl, VT);
+ Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
+ return Res;
}
// Check whether an OR'd tree is PTEST-able.
-static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
- if (!Subtarget->hasSSE41())
+ if (!Subtarget.hasSSE41())
return SDValue();
if (!Op->hasOneUse())
@@ -13969,9 +14614,27 @@ static bool hasNonFlagsUse(SDValue Op) {
return false;
}
+// Emit KTEST instruction for bit vectors on AVX-512
+static SDValue EmitKTEST(SDValue Op, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ if (Op.getOpcode() == ISD::BITCAST) {
+ auto hasKTEST = [&](MVT VT) {
+ unsigned SizeInBits = VT.getSizeInBits();
+ return (Subtarget.hasDQI() && (SizeInBits == 8 || SizeInBits == 16)) ||
+ (Subtarget.hasBWI() && (SizeInBits == 32 || SizeInBits == 64));
+ };
+ SDValue Op0 = Op.getOperand(0);
+ MVT Op0VT = Op0.getValueType().getSimpleVT();
+ if (Op0VT.isVector() && Op0VT.getVectorElementType() == MVT::i1 &&
+ hasKTEST(Op0VT))
+ return DAG.getNode(X86ISD::KTEST, SDLoc(Op), Op0VT, Op0, Op0);
+ }
+ return SDValue();
+}
+
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent.
-SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
+SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
SelectionDAG &DAG) const {
if (Op.getValueType() == MVT::i1) {
SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
@@ -14014,10 +14677,10 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
// doing a separate TEST. TEST always sets OF and CF to 0, so unless
// we prove that the arithmetic won't overflow, we can't use OF or CF.
if (Op.getResNo() != 0 || NeedOF || NeedCF) {
+ // Emit KTEST for bit vectors
+ if (auto Node = EmitKTEST(Op, DAG, Subtarget))
+ return Node;
// Emit a CMP with 0, which is the TEST pattern.
- //if (Op.getValueType() == MVT::i1)
- // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
- // DAG.getConstant(0, MVT::i1));
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
DAG.getConstant(0, dl, Op.getValueType()));
}
@@ -14071,14 +14734,14 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
if (ConstantSDNode *C =
dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
// An add of one will be selected as an INC.
- if (C->isOne() && !Subtarget->slowIncDec()) {
+ if (C->isOne() && !Subtarget.slowIncDec()) {
Opcode = X86ISD::INC;
NumOperands = 1;
break;
}
// An add of negative one (subtract of one) will be selected as a DEC.
- if (C->isAllOnesValue() && !Subtarget->slowIncDec()) {
+ if (C->isAllOnesValue() && !Subtarget.slowIncDec()) {
Opcode = X86ISD::DEC;
NumOperands = 1;
break;
@@ -14106,18 +14769,26 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
: APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
if (!Mask.isSignedIntN(32)) // Avoid large immediates.
break;
- SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
- DAG.getConstant(Mask, dl, VT));
- DAG.ReplaceAllUsesWith(Op, New);
- Op = New;
+ Op = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
+ DAG.getConstant(Mask, dl, VT));
}
break;
case ISD::AND:
- // If the primary and result isn't used, don't bother using X86ISD::AND,
+ // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
// because a TEST instruction will be better.
- if (!hasNonFlagsUse(Op))
- break;
+ if (!hasNonFlagsUse(Op)) {
+ SDValue Op0 = ArithOp->getOperand(0);
+ SDValue Op1 = ArithOp->getOperand(1);
+ EVT VT = ArithOp.getValueType();
+ bool isAndn = isBitwiseNot(Op0) || isBitwiseNot(Op1);
+ bool isLegalAndnType = VT == MVT::i32 || VT == MVT::i64;
+
+ // But if we can combine this into an ANDN operation, then create an AND
+ // now and allow it to be pattern matched into an ANDN.
+ if (!Subtarget.hasBMI() || !isAndn || !isLegalAndnType)
+ break;
+ }
// FALL THROUGH
case ISD::SUB:
case ISD::OR:
@@ -14137,8 +14808,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
case ISD::AND: Opcode = X86ISD::AND; break;
case ISD::OR: {
if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
- SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
- if (EFLAGS.getNode())
+ if (SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG))
return EFLAGS;
}
Opcode = X86ISD::OR;
@@ -14190,11 +14860,15 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
}
}
- if (Opcode == 0)
+ if (Opcode == 0) {
+ // Emit KTEST for bit vectors
+ if (auto Node = EmitKTEST(Op, DAG, Subtarget))
+ return Node;
+
// Emit a CMP with 0, which is the TEST pattern.
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
DAG.getConstant(0, dl, Op.getValueType()));
-
+ }
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
@@ -14206,7 +14880,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
/// Emit nodes that will be selected as "cmp Op0,Op1", or something
/// equivalent.
SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
- SDLoc dl, SelectionDAG &DAG) const {
+ const SDLoc &dl, SelectionDAG &DAG) const {
if (isNullConstant(Op1))
return EmitTest(Op0, X86CC, dl, DAG);
@@ -14215,13 +14889,12 @@ SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
- // Do the comparison at i32 if it's smaller, besides the Atom case.
- // This avoids subregister aliasing issues. Keep the smaller reference
- // if we're optimizing for size, however, as that'll allow better folding
- // of memory operations.
- if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
+ // Only promote the compare up to I32 if it is a 16 bit operation
+ // with an immediate. 16 bit immediates are to be avoided.
+ if ((Op0.getValueType() == MVT::i16 &&
+ (isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1))) &&
!DAG.getMachineFunction().getFunction()->optForMinSize() &&
- !Subtarget->isAtom()) {
+ !Subtarget.isAtom()) {
unsigned ExtendOp =
isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
@@ -14241,7 +14914,7 @@ SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
SelectionDAG &DAG) const {
// If the subtarget does not support the FUCOMI instruction, floating-point
// comparisons have to be converted.
- if (Subtarget->hasCMov() ||
+ if (Subtarget.hasCMov() ||
Cmp.getOpcode() != X86ISD::CMP ||
!Cmp.getOperand(0).getValueType().isFloatingPoint() ||
!Cmp.getOperand(1).getValueType().isFloatingPoint())
@@ -14259,7 +14932,7 @@ SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
// Some 64-bit targets lack SAHF support, but they do support FCOMI.
- assert(Subtarget->hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?");
+ assert(Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?");
return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
}
@@ -14279,10 +14952,10 @@ SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
// instructions: convert to single, rsqrtss, convert back to double, refine
// (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
// along with FMA, this could be a throughput win.
- if (VT == MVT::f32 && Subtarget->hasSSE1())
+ if (VT == MVT::f32 && Subtarget.hasSSE1())
RecipOp = "sqrtf";
- else if ((VT == MVT::v4f32 && Subtarget->hasSSE1()) ||
- (VT == MVT::v8f32 && Subtarget->hasAVX()))
+ else if ((VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
+ (VT == MVT::v8f32 && Subtarget.hasAVX()))
RecipOp = "vec-sqrtf";
else
return SDValue();
@@ -14311,10 +14984,10 @@ SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
// 15 instructions: convert to single, rcpss, convert back to double, refine
// (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
// along with FMA, this could be a throughput win.
- if (VT == MVT::f32 && Subtarget->hasSSE1())
+ if (VT == MVT::f32 && Subtarget.hasSSE1())
RecipOp = "divf";
- else if ((VT == MVT::v4f32 && Subtarget->hasSSE1()) ||
- (VT == MVT::v8f32 && Subtarget->hasAVX()))
+ else if ((VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
+ (VT == MVT::v8f32 && Subtarget.hasAVX()))
RecipOp = "vec-divf";
else
return SDValue();
@@ -14337,10 +15010,9 @@ unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
return 2;
}
-/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
-/// if it's possible.
+/// Result of 'and' is compared against zero. Change to a BT node if possible.
SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
- SDLoc dl, SelectionDAG &DAG) const {
+ const SDLoc &dl, SelectionDAG &DAG) const {
SDValue Op0 = And.getOperand(0);
SDValue Op1 = And.getOperand(1);
if (Op0.getOpcode() == ISD::TRUNCATE)
@@ -14353,19 +15025,19 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
std::swap(Op0, Op1);
if (Op0.getOpcode() == ISD::SHL) {
if (isOneConstant(Op0.getOperand(0))) {
- // If we looked past a truncate, check that it's only truncating away
- // known zeros.
- unsigned BitWidth = Op0.getValueSizeInBits();
- unsigned AndBitWidth = And.getValueSizeInBits();
- if (BitWidth > AndBitWidth) {
- APInt Zeros, Ones;
- DAG.computeKnownBits(Op0, Zeros, Ones);
- if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
- return SDValue();
- }
- LHS = Op1;
- RHS = Op0.getOperand(1);
+ // If we looked past a truncate, check that it's only truncating away
+ // known zeros.
+ unsigned BitWidth = Op0.getValueSizeInBits();
+ unsigned AndBitWidth = And.getValueSizeInBits();
+ if (BitWidth > AndBitWidth) {
+ APInt Zeros, Ones;
+ DAG.computeKnownBits(Op0, Zeros, Ones);
+ if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
+ return SDValue();
}
+ LHS = Op1;
+ RHS = Op0.getOperand(1);
+ }
} else if (Op1.getOpcode() == ISD::Constant) {
ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
uint64_t AndRHSVal = AndRHS->getZExtValue();
@@ -14407,8 +15079,8 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
return SDValue();
}
-/// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
-/// mask CMPs.
+/// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
+/// CMPs.
static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
SDValue &Op1) {
unsigned SSECC;
@@ -14452,8 +15124,8 @@ static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
return SSECC;
}
-// Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
-// ones, and then concatenate the result back.
+/// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
+/// concatenate the result back.
static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
@@ -14466,13 +15138,13 @@ static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
// Extract the LHS vectors
SDValue LHS = Op.getOperand(0);
- SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
- SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
+ SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
+ SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
// Extract the RHS vectors
SDValue RHS = Op.getOperand(1);
- SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
- SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
+ SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
+ SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
// Issue the operation on the smaller types and concatenate the result back
MVT EltVT = VT.getVectorElementType();
@@ -14525,16 +15197,15 @@ static SDValue LowerBoolVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
}
}
-static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
+
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue CC = Op.getOperand(2);
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
- assert(Op0.getSimpleValueType().getVectorElementType().getSizeInBits() >= 8 &&
- Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
+ assert(VT.getVectorElementType() == MVT::i1 &&
"Cannot set masked compare for this operation");
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
@@ -14568,8 +15239,8 @@ static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
/// \brief Try to turn a VSETULT into a VSETULE by modifying its second
/// operand \p Op1. If non-trivial (for example because it's not constant)
/// return an empty value.
-static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
-{
+static SDValue ChangeVSETULTtoVSETULE(const SDLoc &dl, SDValue Op1,
+ SelectionDAG &DAG) {
BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
if (!BV)
return SDValue();
@@ -14592,10 +15263,10 @@ static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
ULTOp1.push_back(DAG.getConstant(Val - 1, dl, EVT));
}
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
+ return DAG.getBuildVector(VT, dl, ULTOp1);
}
-static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
@@ -14611,32 +15282,59 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
assert(EltVT == MVT::f32 || EltVT == MVT::f64);
#endif
- unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
- unsigned Opc = X86ISD::CMPP;
- if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
+ unsigned Opc;
+ if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1) {
assert(VT.getVectorNumElements() <= 16);
Opc = X86ISD::CMPM;
- }
- // In the two special cases we can't handle, emit two comparisons.
+ } else {
+ Opc = X86ISD::CMPP;
+ // The SSE/AVX packed FP comparison nodes are defined with a
+ // floating-point vector result that matches the operand type. This allows
+ // them to work with an SSE1 target (integer vector types are not legal).
+ VT = Op0.getSimpleValueType();
+ }
+
+ // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
+ // emit two comparisons and a logic op to tie them together.
+ // TODO: This can be avoided if Intel (and only Intel as of 2016) AVX is
+ // available.
+ SDValue Cmp;
+ unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
if (SSECC == 8) {
+ // LLVM predicate is SETUEQ or SETONE.
unsigned CC0, CC1;
unsigned CombineOpc;
if (SetCCOpcode == ISD::SETUEQ) {
- CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
+ CC0 = 3; // UNORD
+ CC1 = 0; // EQ
+ CombineOpc = Opc == X86ISD::CMPP ? static_cast<unsigned>(X86ISD::FOR) :
+ static_cast<unsigned>(ISD::OR);
} else {
assert(SetCCOpcode == ISD::SETONE);
- CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
+ CC0 = 7; // ORD
+ CC1 = 4; // NEQ
+ CombineOpc = Opc == X86ISD::CMPP ? static_cast<unsigned>(X86ISD::FAND) :
+ static_cast<unsigned>(ISD::AND);
}
SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
DAG.getConstant(CC0, dl, MVT::i8));
SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
DAG.getConstant(CC1, dl, MVT::i8));
- return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
+ Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
+ } else {
+ // Handle all other FP comparisons here.
+ Cmp = DAG.getNode(Opc, dl, VT, Op0, Op1,
+ DAG.getConstant(SSECC, dl, MVT::i8));
}
- // Handle all other FP comparisons here.
- return DAG.getNode(Opc, dl, VT, Op0, Op1,
- DAG.getConstant(SSECC, dl, MVT::i8));
+
+ // If this is SSE/AVX CMPP, bitcast the result back to integer to match the
+ // result type of SETCC. The bitcast is expected to be optimized away
+ // during combining/isel.
+ if (Opc == X86ISD::CMPP)
+ Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
+
+ return Cmp;
}
MVT VTOp0 = Op0.getSimpleValueType();
@@ -14665,38 +15363,38 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
// The non-AVX512 code below works under the assumption that source and
// destination types are the same.
- assert((Subtarget->hasAVX512() || (VT == VTOp0)) &&
+ assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
"Value types for source and destination must be the same!");
// Break 256-bit integer vector compare into smaller ones.
- if (VT.is256BitVector() && !Subtarget->hasInt256())
+ if (VT.is256BitVector() && !Subtarget.hasInt256())
return Lower256IntVSETCC(Op, DAG);
+ // Operands are boolean (vectors of i1)
MVT OpVT = Op1.getSimpleValueType();
if (OpVT.getVectorElementType() == MVT::i1)
return LowerBoolVSETCC_AVX512(Op, DAG);
- bool MaskResult = (VT.getVectorElementType() == MVT::i1);
- if (Subtarget->hasAVX512()) {
- if (Op1.getSimpleValueType().is512BitVector() ||
- (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
- (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
- return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
-
+ // The result is boolean, but operands are int/float
+ if (VT.getVectorElementType() == MVT::i1) {
// In AVX-512 architecture setcc returns mask with i1 elements,
// But there is no compare instruction for i8 and i16 elements in KNL.
- // We are not talking about 512-bit operands in this case, these
- // types are illegal.
- if (MaskResult &&
- (OpVT.getVectorElementType().getSizeInBits() < 32 &&
- OpVT.getVectorElementType().getSizeInBits() >= 8))
- return DAG.getNode(ISD::TRUNCATE, dl, VT,
- DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
+ // In this case use SSE compare
+ bool UseAVX512Inst =
+ (OpVT.is512BitVector() ||
+ OpVT.getVectorElementType().getSizeInBits() >= 32 ||
+ (Subtarget.hasBWI() && Subtarget.hasVLX()));
+
+ if (UseAVX512Inst)
+ return LowerIntVSETCC_AVX512(Op, DAG);
+
+ return DAG.getNode(ISD::TRUNCATE, dl, VT,
+ DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
}
// Lower using XOP integer comparisons.
if ((VT == MVT::v16i8 || VT == MVT::v8i16 ||
- VT == MVT::v4i32 || VT == MVT::v2i64) && Subtarget->hasXOP()) {
+ VT == MVT::v4i32 || VT == MVT::v2i64) && Subtarget.hasXOP()) {
// Translate compare code to XOP PCOM compare mode.
unsigned CmpMode = 0;
switch (SetCCOpcode) {
@@ -14748,8 +15446,8 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
// Special case: Use min/max operations for SETULE/SETUGE
MVT VET = VT.getVectorElementType();
bool hasMinMax =
- (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
- || (Subtarget->hasSSE2() && (VET == MVT::i8));
+ (Subtarget.hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
+ || (Subtarget.hasSSE2() && (VET == MVT::i8));
if (hasMinMax) {
switch (SetCCOpcode) {
@@ -14761,7 +15459,7 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
}
- bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
+ bool hasSubus = Subtarget.hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
if (!MinMax && hasSubus) {
// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
// Op0 u<= Op1:
@@ -14775,10 +15473,9 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
// beneficial because the constant in the register is no longer
// destructed as the destination so it can be hoisted out of a loop.
// Only do this pre-AVX since vpcmp* is no longer destructive.
- if (Subtarget->hasAVX())
+ if (Subtarget.hasAVX())
break;
- SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
- if (ULEOp1.getNode()) {
+ if (SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG)) {
Op1 = ULEOp1;
Subus = true; Invert = false; Swap = false;
}
@@ -14801,8 +15498,8 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
// Check that the operation in question is available (most are plain SSE2,
// but PCMPGTQ and PCMPEQQ have different requirements).
if (VT == MVT::v2i64) {
- if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
- assert(Subtarget->hasSSE2() && "Don't know how to lower!");
+ if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
+ assert(Subtarget.hasSSE2() && "Don't know how to lower!");
// First cast everything to the right type.
Op0 = DAG.getBitcast(MVT::v4i32, Op0);
@@ -14817,8 +15514,7 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
} else {
SDValue Sign = DAG.getConstant(0x80000000U, dl, MVT::i32);
SDValue Zero = DAG.getConstant(0x00000000U, dl, MVT::i32);
- SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
- Sign, Zero, Sign, Zero);
+ SB = DAG.getBuildVector(MVT::v4i32, dl, {Sign, Zero, Sign, Zero});
}
Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
@@ -14843,10 +15539,10 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
return DAG.getBitcast(VT, Result);
}
- if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
+ if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
// If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
// pcmpeqd + pshufd + pand.
- assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
+ assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
// First cast everything to the right type.
Op0 = DAG.getBitcast(MVT::v4i32, Op0);
@@ -14899,7 +15595,7 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
- assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
+ assert(((!Subtarget.hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
&& "SetCC type must be 8-bit or 1-bit integer");
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
@@ -14914,8 +15610,11 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
isNullConstant(Op1) &&
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
if (SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG)) {
- if (VT == MVT::i1)
+ if (VT == MVT::i1) {
+ NewSetCC = DAG.getNode(ISD::AssertZext, dl, MVT::i8, NewSetCC,
+ DAG.getValueType(MVT::i1));
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
+ }
return NewSetCC;
}
}
@@ -14937,16 +15636,23 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
DAG.getConstant(CCode, dl, MVT::i8),
Op0.getOperand(1));
- if (VT == MVT::i1)
+ if (VT == MVT::i1) {
+ SetCC = DAG.getNode(ISD::AssertZext, dl, MVT::i8, SetCC,
+ DAG.getValueType(MVT::i1));
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
+ }
return SetCC;
}
}
- if ((Op0.getValueType() == MVT::i1) && isOneConstant(Op1) &&
- (CC == ISD::SETEQ || CC == ISD::SETNE)) {
-
- ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
- return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, dl, MVT::i1), NewCC);
+ if (Op0.getValueType() == MVT::i1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
+ if (isOneConstant(Op1)) {
+ ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
+ return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, dl, MVT::i1), NewCC);
+ }
+ if (!isNullConstant(Op1)) {
+ SDValue Xor = DAG.getNode(ISD::XOR, dl, MVT::i1, Op0, Op1);
+ return DAG.getSetCC(dl, VT, Xor, DAG.getConstant(0, dl, MVT::i1), CC);
+ }
}
bool isFP = Op1.getSimpleValueType().isFloatingPoint();
@@ -14958,8 +15664,11 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
DAG.getConstant(X86CC, dl, MVT::i8), EFLAGS);
- if (VT == MVT::i1)
+ if (VT == MVT::i1) {
+ SetCC = DAG.getNode(ISD::AssertZext, dl, MVT::i8, SetCC,
+ DAG.getValueType(MVT::i1));
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
+ }
return SetCC;
}
@@ -14978,12 +15687,15 @@ SDValue X86TargetLowering::LowerSETCCE(SDValue Op, SelectionDAG &DAG) const {
SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry);
SDValue SetCC = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
DAG.getConstant(CC, DL, MVT::i8), Cmp.getValue(1));
- if (Op.getSimpleValueType() == MVT::i1)
- return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
+ if (Op.getSimpleValueType() == MVT::i1) {
+ SetCC = DAG.getNode(ISD::AssertZext, DL, MVT::i8, SetCC,
+ DAG.getValueType(MVT::i1));
+ return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
+ }
return SetCC;
}
-// isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
+/// Return true if opcode is a X86 logical comparison.
static bool isX86LogicalCmp(SDValue Op) {
unsigned Opc = Op.getNode()->getOpcode();
if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
@@ -15009,14 +15721,23 @@ static bool isX86LogicalCmp(SDValue Op) {
return false;
}
-static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
+/// Returns the "condition" node, that may be wrapped with "truncate".
+/// Like this: (i1 (trunc (i8 X86ISD::SETCC))).
+static SDValue getCondAfterTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
if (V.getOpcode() != ISD::TRUNCATE)
- return false;
+ return V;
SDValue VOp0 = V.getOperand(0);
+ if (VOp0.getOpcode() == ISD::AssertZext &&
+ V.getValueSizeInBits() ==
+ cast<VTSDNode>(VOp0.getOperand(1))->getVT().getSizeInBits())
+ return VOp0.getOperand(0);
+
unsigned InBits = VOp0.getValueSizeInBits();
unsigned Bits = V.getValueSizeInBits();
- return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
+ if (DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits)))
+ return V.getOperand(0);
+ return V;
}
SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
@@ -15032,15 +15753,15 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// are available or VBLENDV if AVX is available.
// Otherwise FP cmovs get lowered into a less efficient branch sequence later.
if (Cond.getOpcode() == ISD::SETCC &&
- ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
- (Subtarget->hasSSE1() && VT == MVT::f32)) &&
+ ((Subtarget.hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
+ (Subtarget.hasSSE1() && VT == MVT::f32)) &&
VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
int SSECC = translateX86FSETCC(
cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
if (SSECC != 8) {
- if (Subtarget->hasAVX512()) {
+ if (Subtarget.hasAVX512()) {
SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
DAG.getConstant(SSECC, DL, MVT::i8));
return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
@@ -15062,7 +15783,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// instructions as the AND/ANDN/OR sequence due to register moves, so
// don't bother.
- if (Subtarget->hasAVX() &&
+ if (Subtarget.hasAVX() &&
!isa<ConstantFPSDNode>(Op1) && !isa<ConstantFPSDNode>(Op2)) {
// Convert to vectors, do a VSELECT, and convert back to scalar.
@@ -15122,8 +15843,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
}
if (Cond.getOpcode() == ISD::SETCC) {
- SDValue NewCond = LowerSETCC(Cond, DAG);
- if (NewCond.getNode())
+ if (SDValue NewCond = LowerSETCC(Cond, DAG))
Cond = NewCond;
}
@@ -15240,8 +15960,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
if (addTest) {
// Look past the truncate if the high bits are known zero.
- if (isTruncWithZeroHighBitsInput(Cond, DAG))
- Cond = Cond.getOperand(0);
+ Cond = getCondAfterTruncWithZeroHighBitsInput(Cond, DAG);
// We know the result of AND is compared against zero. Try to match
// it to BT.
@@ -15302,7 +16021,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
}
static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
@@ -15313,22 +16032,22 @@ static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op,
// SKX processor
if ((InVTElt == MVT::i1) &&
- (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
+ (((Subtarget.hasBWI() && Subtarget.hasVLX() &&
VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
- ((Subtarget->hasBWI() && VT.is512BitVector() &&
+ ((Subtarget.hasBWI() && VT.is512BitVector() &&
VTElt.getSizeInBits() <= 16)) ||
- ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
+ ((Subtarget.hasDQI() && Subtarget.hasVLX() &&
VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
- ((Subtarget->hasDQI() && VT.is512BitVector() &&
+ ((Subtarget.hasDQI() && VT.is512BitVector() &&
VTElt.getSizeInBits() >= 32))))
return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
unsigned int NumElts = VT.getVectorNumElements();
- if (NumElts != 8 && NumElts != 16 && !Subtarget->hasBWI())
+ if (NumElts != 8 && NumElts != 16 && !Subtarget.hasBWI())
return SDValue();
if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
@@ -15352,25 +16071,35 @@ static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op,
}
static SDValue LowerSIGN_EXTEND_VECTOR_INREG(SDValue Op,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue In = Op->getOperand(0);
MVT VT = Op->getSimpleValueType(0);
MVT InVT = In.getSimpleValueType();
assert(VT.getSizeInBits() == InVT.getSizeInBits());
+ MVT SVT = VT.getVectorElementType();
MVT InSVT = InVT.getVectorElementType();
- assert(VT.getVectorElementType().getSizeInBits() > InSVT.getSizeInBits());
+ assert(SVT.getSizeInBits() > InSVT.getSizeInBits());
- if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16)
+ if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
return SDValue();
if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
return SDValue();
+ if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
+ !(VT.is256BitVector() && Subtarget.hasInt256()))
+ return SDValue();
SDLoc dl(Op);
+ // For 256-bit vectors, we only need the lower (128-bit) half of the input.
+ if (VT.is256BitVector())
+ In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
+ MVT::getVectorVT(InSVT, InVT.getVectorNumElements() / 2),
+ In, DAG.getIntPtrConstant(0, dl));
+
// SSE41 targets can use the pmovsx* instructions directly.
- if (Subtarget->hasSSE41())
+ if (Subtarget.hasSSE41())
return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
// pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
@@ -15407,7 +16136,7 @@ static SDValue LowerSIGN_EXTEND_VECTOR_INREG(SDValue Op,
return SDValue();
}
-static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
@@ -15422,7 +16151,7 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
(VT != MVT::v16i16 || InVT != MVT::v16i8))
return SDValue();
- if (Subtarget->hasInt256())
+ if (Subtarget.hasInt256())
return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
// Optimize vectors in AVX mode
@@ -15441,13 +16170,13 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
for (unsigned i = 0; i != NumElems/2; ++i)
ShufMask1[i] = i;
- SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
+ SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, ShufMask1);
SmallVector<int,8> ShufMask2(NumElems, -1);
for (unsigned i = 0; i != NumElems/2; ++i)
ShufMask2[i] = i + NumElems/2;
- SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
+ SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, ShufMask2);
MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(),
VT.getVectorNumElements()/2);
@@ -15458,6 +16187,157 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
}
+// Lower truncating store. We need a special lowering to vXi1 vectors
+static SDValue LowerTruncatingStore(SDValue StOp, const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
+ StoreSDNode *St = cast<StoreSDNode>(StOp.getNode());
+ SDLoc dl(St);
+ EVT MemVT = St->getMemoryVT();
+ assert(St->isTruncatingStore() && "We only custom truncating store.");
+ assert(MemVT.isVector() && MemVT.getVectorElementType() == MVT::i1 &&
+ "Expected truncstore of i1 vector");
+
+ SDValue Op = St->getValue();
+ MVT OpVT = Op.getValueType().getSimpleVT();
+ unsigned NumElts = OpVT.getVectorNumElements();
+ if ((Subtarget.hasVLX() && Subtarget.hasBWI() && Subtarget.hasDQI()) ||
+ NumElts == 16) {
+ // Truncate and store - everything is legal
+ Op = DAG.getNode(ISD::TRUNCATE, dl, MemVT, Op);
+ if (MemVT.getSizeInBits() < 8)
+ Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
+ DAG.getUNDEF(MVT::v8i1), Op,
+ DAG.getIntPtrConstant(0, dl));
+ return DAG.getStore(St->getChain(), dl, Op, St->getBasePtr(),
+ St->getMemOperand());
+ }
+
+ // A subset, assume that we have only AVX-512F
+ if (NumElts <= 8) {
+ if (NumElts < 8) {
+ // Extend to 8-elts vector
+ MVT ExtVT = MVT::getVectorVT(OpVT.getScalarType(), 8);
+ Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ExtVT,
+ DAG.getUNDEF(ExtVT), Op, DAG.getIntPtrConstant(0, dl));
+ }
+ Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i1, Op);
+ return DAG.getStore(St->getChain(), dl, Op, St->getBasePtr(),
+ St->getMemOperand());
+ }
+ // v32i8
+ assert(OpVT == MVT::v32i8 && "Unexpected operand type");
+ // Divide the vector into 2 parts and store each part separately
+ SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, Op,
+ DAG.getIntPtrConstant(0, dl));
+ Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::v16i1, Lo);
+ SDValue BasePtr = St->getBasePtr();
+ SDValue StLo = DAG.getStore(St->getChain(), dl, Lo, BasePtr,
+ St->getMemOperand());
+ SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, Op,
+ DAG.getIntPtrConstant(16, dl));
+ Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::v16i1, Hi);
+
+ SDValue BasePtrHi =
+ DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
+ DAG.getConstant(2, dl, BasePtr.getValueType()));
+
+ SDValue StHi = DAG.getStore(St->getChain(), dl, Hi,
+ BasePtrHi, St->getMemOperand());
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StLo, StHi);
+}
+
+static SDValue LowerExtended1BitVectorLoad(SDValue Op,
+ const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
+
+ LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
+ SDLoc dl(Ld);
+ EVT MemVT = Ld->getMemoryVT();
+ assert(MemVT.isVector() && MemVT.getScalarType() == MVT::i1 &&
+ "Expected i1 vector load");
+ unsigned ExtOpcode = Ld->getExtensionType() == ISD::ZEXTLOAD ?
+ ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
+ MVT VT = Op.getValueType().getSimpleVT();
+ unsigned NumElts = VT.getVectorNumElements();
+
+ if ((Subtarget.hasVLX() && Subtarget.hasBWI() && Subtarget.hasDQI()) ||
+ NumElts == 16) {
+ // Load and extend - everything is legal
+ if (NumElts < 8) {
+ SDValue Load = DAG.getLoad(MVT::v8i1, dl, Ld->getChain(),
+ Ld->getBasePtr(),
+ Ld->getMemOperand());
+ // Replace chain users with the new chain.
+ assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
+ MVT ExtVT = MVT::getVectorVT(VT.getScalarType(), 8);
+ SDValue ExtVec = DAG.getNode(ExtOpcode, dl, ExtVT, Load);
+
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
+ DAG.getIntPtrConstant(0, dl));
+ }
+ SDValue Load = DAG.getLoad(MemVT, dl, Ld->getChain(),
+ Ld->getBasePtr(),
+ Ld->getMemOperand());
+ // Replace chain users with the new chain.
+ assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
+
+ // Finally, do a normal sign-extend to the desired register.
+ return DAG.getNode(ExtOpcode, dl, Op.getValueType(), Load);
+ }
+
+ if (NumElts <= 8) {
+ // A subset, assume that we have only AVX-512F
+ unsigned NumBitsToLoad = NumElts < 8 ? 8 : NumElts;
+ MVT TypeToLoad = MVT::getIntegerVT(NumBitsToLoad);
+ SDValue Load = DAG.getLoad(TypeToLoad, dl, Ld->getChain(),
+ Ld->getBasePtr(),
+ Ld->getMemOperand());
+ // Replace chain users with the new chain.
+ assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
+
+ MVT MaskVT = MVT::getVectorVT(MVT::i1, NumBitsToLoad);
+ SDValue BitVec = DAG.getBitcast(MaskVT, Load);
+
+ if (NumElts == 8)
+ return DAG.getNode(ExtOpcode, dl, VT, BitVec);
+
+ // we should take care to v4i1 and v2i1
+
+ MVT ExtVT = MVT::getVectorVT(VT.getScalarType(), 8);
+ SDValue ExtVec = DAG.getNode(ExtOpcode, dl, ExtVT, BitVec);
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
+ DAG.getIntPtrConstant(0, dl));
+ }
+
+ assert(VT == MVT::v32i8 && "Unexpected extload type");
+
+ SmallVector<SDValue, 2> Chains;
+
+ SDValue BasePtr = Ld->getBasePtr();
+ SDValue LoadLo = DAG.getLoad(MVT::v16i1, dl, Ld->getChain(),
+ Ld->getBasePtr(),
+ Ld->getMemOperand());
+ Chains.push_back(LoadLo.getValue(1));
+
+ SDValue BasePtrHi =
+ DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
+ DAG.getConstant(2, dl, BasePtr.getValueType()));
+
+ SDValue LoadHi = DAG.getLoad(MVT::v16i1, dl, Ld->getChain(),
+ BasePtrHi,
+ Ld->getMemOperand());
+ Chains.push_back(LoadHi.getValue(1));
+ SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
+
+ SDValue Lo = DAG.getNode(ExtOpcode, dl, MVT::v16i8, LoadLo);
+ SDValue Hi = DAG.getNode(ExtOpcode, dl, MVT::v16i8, LoadHi);
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v32i8, Lo, Hi);
+}
+
// Lower vector extended loads using a shuffle. If SSSE3 is not available we
// may emit an illegal shuffle but the expansion is still better than scalar
// code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
@@ -15465,7 +16345,7 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
// FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
// TODO: It is possible to support ZExt by zeroing the undef values during
// the shuffle phase or after the shuffle.
-static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT RegVT = Op.getSimpleValueType();
assert(RegVT.isVector() && "We only custom lower vector sext loads.");
@@ -15473,11 +16353,14 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
"We only custom lower integer vector sext loads.");
// Nothing useful we can do without SSE2 shuffles.
- assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
+ assert(Subtarget.hasSSE2() && "We only custom lower sext loads with SSE2.");
LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
SDLoc dl(Ld);
EVT MemVT = Ld->getMemoryVT();
+ if (MemVT.getScalarType() == MVT::i1)
+ return LowerExtended1BitVectorLoad(Op, Subtarget, DAG);
+
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned RegSz = RegVT.getSizeInBits();
@@ -15492,7 +16375,7 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
unsigned MemSz = MemVT.getSizeInBits();
assert(RegSz > MemSz && "Register size must be greater than the mem size");
- if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
+ if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget.hasInt256()) {
// The only way in which we have a legal 256-bit vector result but not the
// integer 256-bit operations needed to directly lower a sextload is if we
// have AVX1 but not AVX2. In that case, we can always emit a sextload to
@@ -15508,8 +16391,8 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
"it must be a legal 128-bit vector "
"type!");
Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
- Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
- Ld->isInvariant(), Ld->getAlignment());
+ Ld->getPointerInfo(), Ld->getAlignment(),
+ Ld->getMemOperand()->getFlags());
} else {
assert(MemSz < 128 &&
"Can't extend a type wider than 128 bits to a 256 bit vector!");
@@ -15522,9 +16405,8 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
Load =
DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
- Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
- Ld->isNonTemporal(), Ld->isInvariant(),
- Ld->getAlignment());
+ Ld->getPointerInfo(), MemVT, Ld->getAlignment(),
+ Ld->getMemOperand()->getFlags());
}
// Replace chain users with the new chain.
@@ -15592,8 +16474,7 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
// Perform a single load.
SDValue ScalarLoad =
DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
- Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
- Ld->getAlignment());
+ Ld->getAlignment(), Ld->getMemOperand()->getFlags());
Chains.push_back(ScalarLoad.getValue(1));
// Create the first element type using SCALAR_TO_VECTOR in order to avoid
// another round of DAGCombining.
@@ -15615,7 +16496,7 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
if (Ext == ISD::SEXTLOAD) {
// If we have SSE4.1, we can directly emit a VSEXT node.
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
return Sext;
@@ -15637,7 +16518,7 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
ShuffleVec[i * SizeRatio] = i;
SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
- DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
+ DAG.getUNDEF(WideVecVT), ShuffleVec);
// Bitcast to the requested type.
Shuff = DAG.getBitcast(RegVT, Shuff);
@@ -15645,9 +16526,8 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
return Shuff;
}
-// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
-// ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
-// from the AND / OR.
+/// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
+/// each of which has no other use apart from the AND / OR.
static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
Opc = Op.getOpcode();
if (Opc != ISD::OR && Opc != ISD::AND)
@@ -15658,8 +16538,8 @@ static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
Op.getOperand(1).hasOneUse());
}
-// isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
-// 1 and that the SETCC node has a single use.
+/// Return true if node is an ISD::XOR of a X86ISD::SETCC and 1 and that the
+/// SETCC node has a single use.
static bool isXor1OfSetCC(SDValue Op) {
if (Op.getOpcode() != ISD::XOR)
return false;
@@ -15692,8 +16572,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
Inverted = true;
Cond = Cond.getOperand(0);
} else {
- SDValue NewCond = LowerSETCC(Cond, DAG);
- if (NewCond.getNode())
+ if (SDValue NewCond = LowerSETCC(Cond, DAG))
Cond = NewCond;
}
}
@@ -15917,8 +16796,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
if (addTest) {
// Look pass the truncate if the high bits are known zero.
- if (isTruncWithZeroHighBitsInput(Cond, DAG))
- Cond = Cond.getOperand(0);
+ Cond = getCondAfterTruncWithZeroHighBitsInput(Cond, DAG);
// We know the result of AND is compared against zero. Try to match
// it to BT.
@@ -15951,7 +16829,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
bool SplitStack = MF.shouldSplitStack();
- bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
+ bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
SplitStack;
SDLoc dl(Op);
@@ -15966,7 +16844,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
// pointer when other instructions are using the stack.
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, dl, true), dl);
- bool Is64Bit = Subtarget->is64Bit();
+ bool Is64Bit = Subtarget.is64Bit();
MVT SPTy = getPointerTy(DAG.getDataLayout());
SDValue Result;
@@ -15975,13 +16853,10 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
" not tell us which reg is the stack pointer!");
- EVT VT = Node->getValueType(0);
- SDValue Tmp3 = Node->getOperand(2);
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
Chain = SP.getValue(1);
- unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
- const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
+ const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
unsigned StackAlign = TFI.getStackAlignment();
Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
if (Align > StackAlign)
@@ -15995,12 +16870,11 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
// The 64 bit implementation of segmented stacks needs to clobber both r10
// r11. This makes it impossible to use it along with nested parameters.
const Function *F = MF.getFunction();
-
- for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
- I != E; ++I)
- if (I->hasNestAttr())
+ for (const auto &A : F->args()) {
+ if (A.hasNestAttr())
report_fatal_error("Cannot use segmented stacks with functions that "
"have nested arguments.");
+ }
}
const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
@@ -16009,16 +16883,11 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
DAG.getRegister(Vreg, SPTy));
} else {
- SDValue Flag;
- const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
-
- Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
- Flag = Chain.getValue(1);
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size);
+ MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true);
- Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
-
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned SPReg = RegInfo->getStackRegister();
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
Chain = SP.getValue(1);
@@ -16047,13 +16916,13 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
SDLoc DL(Op);
- if (!Subtarget->is64Bit() ||
- Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv())) {
+ if (!Subtarget.is64Bit() ||
+ Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv())) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
- MachinePointerInfo(SV), false, false, 0);
+ MachinePointerInfo(SV));
}
// __va_list_tag:
@@ -16064,45 +16933,45 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
SmallVector<SDValue, 8> MemOps;
SDValue FIN = Op.getOperand(1);
// Store gp_offset
- SDValue Store = DAG.getStore(Op.getOperand(0), DL,
- DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
- DL, MVT::i32),
- FIN, MachinePointerInfo(SV), false, false, 0);
+ SDValue Store = DAG.getStore(
+ Op.getOperand(0), DL,
+ DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
+ MachinePointerInfo(SV));
MemOps.push_back(Store);
// Store fp_offset
- FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
- Store = DAG.getStore(Op.getOperand(0), DL,
- DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL,
- MVT::i32),
- FIN, MachinePointerInfo(SV, 4), false, false, 0);
+ FIN = DAG.getMemBasePlusOffset(FIN, 4, DL);
+ Store = DAG.getStore(
+ Op.getOperand(0), DL,
+ DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
+ MachinePointerInfo(SV, 4));
MemOps.push_back(Store);
// Store ptr to overflow_arg_area
FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
- Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
- MachinePointerInfo(SV, 8),
- false, false, 0);
+ Store =
+ DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
MemOps.push_back(Store);
// Store ptr to reg_save_area.
FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
- Subtarget->isTarget64BitLP64() ? 8 : 4, DL));
+ Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
- Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN, MachinePointerInfo(
- SV, Subtarget->isTarget64BitLP64() ? 16 : 12), false, false, 0);
+ Store = DAG.getStore(
+ Op.getOperand(0), DL, RSFIN, FIN,
+ MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
MemOps.push_back(Store);
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
}
SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
- assert(Subtarget->is64Bit() &&
+ assert(Subtarget.is64Bit() &&
"LowerVAARG only handles 64-bit va_arg!");
assert(Op.getNode()->getNumOperands() == 4);
MachineFunction &MF = DAG.getMachineFunction();
- if (Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv()))
+ if (Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv()))
// The Win64 ABI uses char* instead of a structure.
return DAG.expandVAArg(Op.getNode());
@@ -16132,9 +17001,9 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
if (ArgMode == 2) {
// Sanity Check: Make sure using fp_offset makes sense.
- assert(!Subtarget->useSoftFloat() &&
+ assert(!Subtarget.useSoftFloat() &&
!(MF.getFunction()->hasFnAttribute(Attribute::NoImplicitFloat)) &&
- Subtarget->hasSSE1());
+ Subtarget.hasSSE1());
}
// Insert VAARG_64 node into the DAG
@@ -16153,19 +17022,15 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
Chain = VAARG.getValue(1);
// Load the next argument and return it
- return DAG.getLoad(ArgVT, dl,
- Chain,
- VAARG,
- MachinePointerInfo(),
- false, false, false, 0);
+ return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
}
-static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
// where a va_list is still an i8*.
- assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
- if (Subtarget->isCallingConvWin64(
+ assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
+ if (Subtarget.isCallingConvWin64(
DAG.getMachineFunction().getFunction()->getCallingConv()))
// Probably a Win64 va_copy.
return DAG.expandVACopy(Op.getNode());
@@ -16183,9 +17048,9 @@ static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
}
-// getTargetVShiftByConstNode - Handle vector element shifts where the shift
-// amount is a constant. Takes immediate version of shift as input.
-static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
+/// Handle vector element shifts where the shift amount is a constant.
+/// Takes immediate version of shift as input.
+static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
SDValue SrcOp, uint64_t ShiftAmt,
SelectionDAG &DAG) {
MVT ElementType = VT.getVectorElementType();
@@ -16214,11 +17079,11 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
ConstantSDNode *ND;
switch(Opc) {
- default: llvm_unreachable(nullptr);
+ default: llvm_unreachable("Unknown opcode!");
case X86ISD::VSHLI:
for (unsigned i=0; i!=NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
- if (CurrentOp->getOpcode() == ISD::UNDEF) {
+ if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp);
continue;
}
@@ -16230,7 +17095,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
case X86ISD::VSRLI:
for (unsigned i=0; i!=NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
- if (CurrentOp->getOpcode() == ISD::UNDEF) {
+ if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp);
continue;
}
@@ -16242,7 +17107,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
case X86ISD::VSRAI:
for (unsigned i=0; i!=NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
- if (CurrentOp->getOpcode() == ISD::UNDEF) {
+ if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp);
continue;
}
@@ -16253,16 +17118,16 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
break;
}
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
+ return DAG.getBuildVector(VT, dl, Elts);
}
return DAG.getNode(Opc, dl, VT, SrcOp,
DAG.getConstant(ShiftAmt, dl, MVT::i8));
}
-// getTargetVShiftNode - Handle vector element shifts where the shift amount
-// may or may not be a constant. Takes immediate version of shift as input.
-static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
+/// Handle vector element shifts where the shift amount may or may not be a
+/// constant. Takes immediate version of shift as input.
+static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
SDValue SrcOp, SDValue ShAmt,
SelectionDAG &DAG) {
MVT SVT = ShAmt.getSimpleValueType();
@@ -16288,7 +17153,7 @@ static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
// Let the shuffle legalizer expand this shift amount node.
SDValue Op0 = ShAmt.getOperand(0);
Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
- ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
+ ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, Subtarget, DAG);
} else {
// Need to build a vector containing shift amount.
// SSE/AVX packed shifts only use the lower 64-bit of the shift count.
@@ -16301,7 +17166,7 @@ static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
ShOps.push_back(DAG.getUNDEF(SVT));
MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
- ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
+ ShAmt = DAG.getBuildVector(BVT, dl, ShOps);
}
// The return type has to be a 128-bit type with the same element
@@ -16316,8 +17181,8 @@ static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
/// \brief Return Mask with the necessary casting or extending
/// for \p Mask according to \p MaskVT when lowering masking intrinsics
static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
- const X86Subtarget *Subtarget,
- SelectionDAG &DAG, SDLoc dl) {
+ const X86Subtarget &Subtarget, SelectionDAG &DAG,
+ const SDLoc &dl) {
if (isAllOnesConstant(Mask))
return DAG.getTargetConstant(1, dl, MaskVT);
@@ -16330,9 +17195,9 @@ static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
MVT::getIntegerVT(MaskVT.getSizeInBits()), Mask);
}
- if (Mask.getSimpleValueType() == MVT::i64 && Subtarget->is32Bit()) {
+ if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
if (MaskVT == MVT::v64i1) {
- assert(Subtarget->hasBWI() && "Expected AVX512BW target!");
+ assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
// In case 32bit mode, bitcast i64 is illegal, extend/split it.
SDValue Lo, Hi;
Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
@@ -16368,7 +17233,7 @@ static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
/// necessary casting or extending for \p Mask when lowering masking intrinsics
static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
SDValue PreservedSrc,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
@@ -16393,13 +17258,14 @@ static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
case X86ISD::VTRUNC:
case X86ISD::VTRUNCS:
case X86ISD::VTRUNCUS:
+ case ISD::FP_TO_FP16:
// We can't use ISD::VSELECT here because it is not always "Legal"
// for the destination type. For example vpmovqb require only AVX512
// and vselect that can operate on byte element type require BWI
OpcodeSelect = X86ISD::SELECT;
break;
}
- if (PreservedSrc.getOpcode() == ISD::UNDEF)
+ if (PreservedSrc.isUndef())
PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
}
@@ -16413,7 +17279,7 @@ static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
/// for a scalar instruction.
static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
SDValue PreservedSrc,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
if (isAllOnesConstant(Mask))
return Op;
@@ -16429,7 +17295,7 @@ static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
Op.getOpcode() == X86ISD::VFPCLASSS)
return DAG.getNode(ISD::OR, dl, VT, Op, IMask);
- if (PreservedSrc.getOpcode() == ISD::UNDEF)
+ if (PreservedSrc.isUndef())
PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
}
@@ -16495,7 +17361,7 @@ static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
}
-static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc dl(Op);
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
@@ -16706,6 +17572,16 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
Src1, Src2, Src3),
Mask, PassThru, Subtarget, DAG);
}
+ case VPERM_2OP_MASK : {
+ SDValue Src1 = Op.getOperand(1);
+ SDValue Src2 = Op.getOperand(2);
+ SDValue PassThru = Op.getOperand(3);
+ SDValue Mask = Op.getOperand(4);
+
+ // Swap Src1 and Src2 in the node creation
+ return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1),
+ Mask, PassThru, Subtarget, DAG);
+ }
case VPERM_3OP_MASKZ:
case VPERM_3OP_MASK:{
// Src2 is the PassThru
@@ -16764,6 +17640,30 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
Src1, Src2, Src3),
Mask, PassThru, Subtarget, DAG);
}
+ case FMA_OP_SCALAR_MASK:
+ case FMA_OP_SCALAR_MASK3:
+ case FMA_OP_SCALAR_MASKZ: {
+ SDValue Src1 = Op.getOperand(1);
+ SDValue Src2 = Op.getOperand(2);
+ SDValue Src3 = Op.getOperand(3);
+ SDValue Mask = Op.getOperand(4);
+ MVT VT = Op.getSimpleValueType();
+ SDValue PassThru = SDValue();
+
+ // set PassThru element
+ if (IntrData->Type == FMA_OP_SCALAR_MASKZ)
+ PassThru = getZeroVector(VT, Subtarget, DAG, dl);
+ else if (IntrData->Type == FMA_OP_SCALAR_MASK3)
+ PassThru = Src3;
+ else
+ PassThru = Src1;
+
+ SDValue Rnd = Op.getOperand(5);
+ return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl,
+ Op.getValueType(), Src1, Src2,
+ Src3, Rnd),
+ Mask, PassThru, Subtarget, DAG);
+ }
case TERLOG_OP_MASK:
case TERLOG_OP_MASKZ: {
SDValue Src1 = Op.getOperand(1);
@@ -16879,49 +17779,76 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
MVT::i1),
Subtarget, DAG);
- return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i8,
- DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, CmpMask),
- DAG.getValueType(MVT::i1));
+ return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, CmpMask);
}
case COMI: { // Comparison intrinsics
ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
- unsigned X86CC = TranslateX86CC(CC, dl, true, LHS, RHS, DAG);
- assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
- SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
- SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
- DAG.getConstant(X86CC, dl, MVT::i8), Cond);
+ SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
+ SDValue InvComi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, RHS, LHS);
+ SDValue SetCC;
+ switch (CC) {
+ case ISD::SETEQ: { // (ZF = 0 and PF = 0)
+ SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86::COND_E, dl, MVT::i8), Comi);
+ SDValue SetNP = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86::COND_NP, dl, MVT::i8),
+ Comi);
+ SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
+ break;
+ }
+ case ISD::SETNE: { // (ZF = 1 or PF = 1)
+ SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86::COND_NE, dl, MVT::i8), Comi);
+ SDValue SetP = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86::COND_P, dl, MVT::i8),
+ Comi);
+ SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
+ break;
+ }
+ case ISD::SETGT: // (CF = 0 and ZF = 0)
+ SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86::COND_A, dl, MVT::i8), Comi);
+ break;
+ case ISD::SETLT: { // The condition is opposite to GT. Swap the operands.
+ SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86::COND_A, dl, MVT::i8), InvComi);
+ break;
+ }
+ case ISD::SETGE: // CF = 0
+ SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86::COND_AE, dl, MVT::i8), Comi);
+ break;
+ case ISD::SETLE: // The condition is opposite to GE. Swap the operands.
+ SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86::COND_AE, dl, MVT::i8), InvComi);
+ break;
+ default:
+ llvm_unreachable("Unexpected illegal condition!");
+ }
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
case COMI_RM: { // Comparison intrinsics with Sae
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
- SDValue CC = Op.getOperand(3);
+ unsigned CondVal = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
SDValue Sae = Op.getOperand(4);
- auto ComiType = TranslateX86ConstCondToX86CC(CC);
- // choose between ordered and unordered (comi/ucomi)
- unsigned comiOp = std::get<0>(ComiType) ? IntrData->Opc0 : IntrData->Opc1;
- SDValue Cond;
- if (cast<ConstantSDNode>(Sae)->getZExtValue() !=
- X86::STATIC_ROUNDING::CUR_DIRECTION)
- Cond = DAG.getNode(comiOp, dl, MVT::i32, LHS, RHS, Sae);
+
+ SDValue FCmp;
+ if (cast<ConstantSDNode>(Sae)->getZExtValue() ==
+ X86::STATIC_ROUNDING::CUR_DIRECTION)
+ FCmp = DAG.getNode(X86ISD::FSETCC, dl, MVT::i1, LHS, RHS,
+ DAG.getConstant(CondVal, dl, MVT::i8));
else
- Cond = DAG.getNode(comiOp, dl, MVT::i32, LHS, RHS);
- SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
- DAG.getConstant(std::get<1>(ComiType), dl, MVT::i8), Cond);
- return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
+ FCmp = DAG.getNode(X86ISD::FSETCC, dl, MVT::i1, LHS, RHS,
+ DAG.getConstant(CondVal, dl, MVT::i8), Sae);
+ // AnyExt just uses KMOVW %kreg, %r32; ZeroExt emits "and $1, %reg"
+ return DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, FCmp);
}
case VSHIFT:
return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
Op.getOperand(1), Op.getOperand(2), DAG);
- case VSHIFT_MASK:
- return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
- Op.getSimpleValueType(),
- Op.getOperand(1),
- Op.getOperand(2), DAG),
- Op.getOperand(4), Op.getOperand(3), Subtarget,
- DAG);
case COMPRESS_EXPAND_IN_REG: {
SDValue Mask = Op.getOperand(3);
SDValue DataToCompress = Op.getOperand(1);
@@ -16940,14 +17867,6 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
Mask = DAG.getBitcast(MaskVT, Mask);
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Mask);
}
- case BLEND: {
- SDValue Mask = Op.getOperand(3);
- MVT VT = Op.getSimpleValueType();
- MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
- SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
- return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
- Op.getOperand(2));
- }
case KUNPCK: {
MVT VT = Op.getSimpleValueType();
MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getSizeInBits()/2);
@@ -16960,6 +17879,35 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
Src2, Src1);
return DAG.getBitcast(VT, Res);
}
+ case FIXUPIMMS:
+ case FIXUPIMMS_MASKZ:
+ case FIXUPIMM:
+ case FIXUPIMM_MASKZ:{
+ SDValue Src1 = Op.getOperand(1);
+ SDValue Src2 = Op.getOperand(2);
+ SDValue Src3 = Op.getOperand(3);
+ SDValue Imm = Op.getOperand(4);
+ SDValue Mask = Op.getOperand(5);
+ SDValue Passthru = (IntrData->Type == FIXUPIMM || IntrData->Type == FIXUPIMMS ) ?
+ Src1 : getZeroVector(VT, Subtarget, DAG, dl);
+ // We specify 2 possible modes for intrinsics, with/without rounding
+ // modes.
+ // First, we check if the intrinsic have rounding mode (7 operands),
+ // if not, we set rounding mode to "current".
+ SDValue Rnd;
+ if (Op.getNumOperands() == 7)
+ Rnd = Op.getOperand(6);
+ else
+ Rnd = DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
+ if (IntrData->Type == FIXUPIMM || IntrData->Type == FIXUPIMM_MASKZ)
+ return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
+ Src1, Src2, Src3, Imm, Rnd),
+ Mask, Passthru, Subtarget, DAG);
+ else // Scalar - FIXUPIMMS, FIXUPIMMS_MASKZ
+ return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
+ Src1, Src2, Src3, Imm, Rnd),
+ Mask, Passthru, Subtarget, DAG);
+ }
case CONVERT_TO_MASK: {
MVT SrcVT = Op.getOperand(1).getSimpleValueType();
MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
@@ -16995,6 +17943,21 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
subVec, subVec, immVal),
Mask, Passthru, Subtarget, DAG);
}
+ case BRCST32x2_TO_VEC: {
+ SDValue Src = Op.getOperand(1);
+ SDValue PassThru = Op.getOperand(2);
+ SDValue Mask = Op.getOperand(3);
+
+ assert((VT.getScalarType() == MVT::i32 ||
+ VT.getScalarType() == MVT::f32) && "Unexpected type!");
+ //bitcast Src to packed 64
+ MVT ScalarVT = VT.getScalarType() == MVT::i32 ? MVT::i64 : MVT::f64;
+ MVT BitcastVT = MVT::getVectorVT(ScalarVT, Src.getValueSizeInBits()/64);
+ Src = DAG.getBitcast(BitcastVT, Src);
+
+ return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src),
+ Mask, PassThru, Subtarget, DAG);
+ }
default:
break;
}
@@ -17082,7 +18045,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
SDValue RHS = DAG.getBitcast(MVT::v16i1, Op.getOperand(2));
SDValue CC = DAG.getConstant(X86CC, dl, MVT::i8);
SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
- SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
+ SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
@@ -17163,6 +18126,16 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
return DAG.getNode(Opcode, dl, VTs, NewOps);
}
+ case Intrinsic::eh_sjlj_lsda: {
+ MachineFunction &MF = DAG.getMachineFunction();
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
+ auto &Context = MF.getMMI().getContext();
+ MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
+ Twine(MF.getFunctionNumber()));
+ return DAG.getNode(X86ISD::Wrapper, dl, VT, DAG.getMCSymbol(S, PtrVT));
+ }
+
case Intrinsic::x86_seh_lsda: {
// Compute the symbol for the LSDA. We know it'll get emitted later.
MachineFunction &MF = DAG.getMachineFunction();
@@ -17192,7 +18165,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
// Returns one of the stack, base, or frame pointer registers, depending on
// which is used to reference local variables.
MachineFunction &MF = DAG.getMachineFunction();
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned Reg;
if (RegInfo->hasBasePointer(MF))
Reg = RegInfo->getBaseRegister();
@@ -17206,7 +18179,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Src, SDValue Mask, SDValue Base,
SDValue Index, SDValue ScaleOp, SDValue Chain,
- const X86Subtarget * Subtarget) {
+ const X86Subtarget &Subtarget) {
SDLoc dl(Op);
auto *C = cast<ConstantSDNode>(ScaleOp);
SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
@@ -17217,7 +18190,7 @@ static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
SDValue Segment = DAG.getRegister(0, MVT::i32);
- if (Src.getOpcode() == ISD::UNDEF)
+ if (Src.isUndef())
Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
SDValue Ops[] = {Src, VMask, Base, Scale, Index, Disp, Segment, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
@@ -17237,7 +18210,7 @@ static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
MVT MaskVT = MVT::getVectorVT(MVT::i1,
Index.getSimpleValueType().getVectorNumElements());
- SDValue VMask = getMaskNode(Mask, MaskVT, &Subtarget, DAG, dl);
+ SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
SDValue Ops[] = {Base, Scale, Index, Disp, Segment, VMask, Src, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
@@ -17255,18 +18228,19 @@ static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Segment = DAG.getRegister(0, MVT::i32);
MVT MaskVT =
MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
- SDValue VMask = getMaskNode(Mask, MaskVT, &Subtarget, DAG, dl);
+ SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
//SDVTList VTs = DAG.getVTList(MVT::Other);
SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
return SDValue(Res, 0);
}
-// getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
-// read performance monitor counters (x86_rdpmc).
-static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
- SelectionDAG &DAG, const X86Subtarget *Subtarget,
- SmallVectorImpl<SDValue> &Results) {
+/// Handles the lowering of builtin intrinsics that read performance monitor
+/// counters (x86_rdpmc).
+static void getReadPerformanceCounter(SDNode *N, const SDLoc &DL,
+ SelectionDAG &DAG,
+ const X86Subtarget &Subtarget,
+ SmallVectorImpl<SDValue> &Results) {
assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue LO, HI;
@@ -17279,7 +18253,7 @@ static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
// Reads the content of a 64-bit performance counter and returns it in the
// registers EDX:EAX.
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
LO.getValue(2));
@@ -17290,7 +18264,7 @@ static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
}
Chain = HI.getValue(1);
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
// The EAX register is loaded with the low-order 32 bits. The EDX register
// is loaded with the supported high-order bits of the counter.
SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
@@ -17307,12 +18281,13 @@ static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
Results.push_back(Chain);
}
-// getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
-// read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
-// also used to custom lower READCYCLECOUNTER nodes.
-static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
- SelectionDAG &DAG, const X86Subtarget *Subtarget,
- SmallVectorImpl<SDValue> &Results) {
+/// Handles the lowering of builtin intrinsics that read the time stamp counter
+/// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
+/// READCYCLECOUNTER nodes.
+static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
+ SelectionDAG &DAG,
+ const X86Subtarget &Subtarget,
+ SmallVectorImpl<SDValue> &Results) {
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
SDValue LO, HI;
@@ -17320,7 +18295,7 @@ static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
// The processor's time-stamp counter (a 64-bit MSR) is stored into the
// EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
// and the EAX register is loaded with the low-order 32 bits.
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
LO.getValue(2));
@@ -17341,10 +18316,10 @@ static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
// Explicitly store the content of ECX at the location passed in input
// to the 'rdtscp' intrinsic.
Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo());
}
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
// The EDX register is loaded with the high-order 32 bits of the MSR, and
// the EAX register is loaded with the low-order 32 bits.
SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
@@ -17361,7 +18336,7 @@ static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
Results.push_back(Chain);
}
-static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SmallVector<SDValue, 2> Results;
SDLoc DL(Op);
@@ -17388,44 +18363,25 @@ static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
return Chain;
}
-/// \brief Lower intrinsics for TRUNCATE_TO_MEM case
-/// return truncate Store/MaskedStore Node
-static SDValue LowerINTRINSIC_TRUNCATE_TO_MEM(const SDValue & Op,
- SelectionDAG &DAG,
- MVT ElementType) {
- SDLoc dl(Op);
- SDValue Mask = Op.getOperand(4);
- SDValue DataToTruncate = Op.getOperand(3);
- SDValue Addr = Op.getOperand(2);
+static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
+ MachineFunction &MF = DAG.getMachineFunction();
SDValue Chain = Op.getOperand(0);
+ SDValue EHGuard = Op.getOperand(2);
+ WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
+ if (!EHInfo)
+ report_fatal_error("EHGuard only live in functions using WinEH");
- MVT VT = DataToTruncate.getSimpleValueType();
- MVT SVT = MVT::getVectorVT(ElementType, VT.getVectorNumElements());
-
- if (isAllOnesConstant(Mask)) // return just a truncate store
- return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr,
- MachinePointerInfo(), SVT, false, false,
- SVT.getScalarSizeInBits()/8);
-
- MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
- MVT BitcastVT = MVT::getVectorVT(MVT::i1,
- Mask.getSimpleValueType().getSizeInBits());
- // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
- // are extracted by EXTRACT_SUBVECTOR.
- SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
- DAG.getBitcast(BitcastVT, Mask),
- DAG.getIntPtrConstant(0, dl));
-
- MachineMemOperand *MMO = DAG.getMachineFunction().
- getMachineMemOperand(MachinePointerInfo(),
- MachineMemOperand::MOStore, SVT.getStoreSize(),
- SVT.getScalarSizeInBits()/8);
+ // Cast the operand to an alloca, and remember the frame index.
+ auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
+ if (!FINode)
+ report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
+ EHInfo->EHGuardFrameIndex = FINode->getIndex();
- return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr,
- VMask, SVT, MMO, true);
+ // Return the chain operand without making any DAG nodes.
+ return Chain;
}
-static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
@@ -17433,6 +18389,8 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
if (!IntrData) {
if (IntNo == llvm::Intrinsic::x86_seh_ehregnode)
return MarkEHRegistrationNode(Op, DAG);
+ if (IntNo == llvm::Intrinsic::x86_seh_ehguard)
+ return MarkEHGuard(Op, DAG);
if (IntNo == llvm::Intrinsic::x86_flags_read_u32 ||
IntNo == llvm::Intrinsic::x86_flags_read_u64 ||
IntNo == llvm::Intrinsic::x86_flags_write_u32 ||
@@ -17491,7 +18449,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
SDValue Src = Op.getOperand(5);
SDValue Scale = Op.getOperand(6);
return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
- Scale, Chain, *Subtarget);
+ Scale, Chain, Subtarget);
}
case PREFETCH: {
SDValue Hint = Op.getOperand(6);
@@ -17504,7 +18462,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
SDValue Base = Op.getOperand(4);
SDValue Scale = Op.getOperand(5);
return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
- *Subtarget);
+ Subtarget);
}
// Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
case RDTSC: {
@@ -17532,7 +18490,6 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
}
// ADC/ADCX/SBB
case ADX: {
- SmallVector<SDValue, 2> Results;
SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
@@ -17540,13 +18497,11 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
Op.getOperand(4), GenCF.getValue(1));
SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
- Op.getOperand(5), MachinePointerInfo(),
- false, false, 0);
+ Op.getOperand(5), MachinePointerInfo());
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
DAG.getConstant(X86::COND_B, dl, MVT::i8),
Res.getValue(1));
- Results.push_back(SetCC);
- Results.push_back(Store);
+ SDValue Results[] = { SetCC, Store };
return DAG.getMergeValues(Results, dl);
}
case COMPRESS_TO_MEM: {
@@ -17554,48 +18509,45 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
SDValue DataToCompress = Op.getOperand(3);
SDValue Addr = Op.getOperand(2);
SDValue Chain = Op.getOperand(0);
-
MVT VT = DataToCompress.getSimpleValueType();
+
+ MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
+ assert(MemIntr && "Expected MemIntrinsicSDNode!");
+
if (isAllOnesConstant(Mask)) // return just a store
return DAG.getStore(Chain, dl, DataToCompress, Addr,
- MachinePointerInfo(), false, false,
- VT.getScalarSizeInBits()/8);
+ MemIntr->getMemOperand());
SDValue Compressed =
getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress),
Mask, DAG.getUNDEF(VT), Subtarget, DAG);
return DAG.getStore(Chain, dl, Compressed, Addr,
- MachinePointerInfo(), false, false,
- VT.getScalarSizeInBits()/8);
+ MemIntr->getMemOperand());
}
case TRUNCATE_TO_MEM_VI8:
- return LowerINTRINSIC_TRUNCATE_TO_MEM(Op, DAG, MVT::i8);
case TRUNCATE_TO_MEM_VI16:
- return LowerINTRINSIC_TRUNCATE_TO_MEM(Op, DAG, MVT::i16);
- case TRUNCATE_TO_MEM_VI32:
- return LowerINTRINSIC_TRUNCATE_TO_MEM(Op, DAG, MVT::i32);
- case EXPAND_FROM_MEM: {
+ case TRUNCATE_TO_MEM_VI32: {
SDValue Mask = Op.getOperand(4);
- SDValue PassThru = Op.getOperand(3);
+ SDValue DataToTruncate = Op.getOperand(3);
SDValue Addr = Op.getOperand(2);
SDValue Chain = Op.getOperand(0);
- MVT VT = Op.getSimpleValueType();
- if (isAllOnesConstant(Mask)) // return just a load
- return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
- false, VT.getScalarSizeInBits()/8);
+ MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
+ assert(MemIntr && "Expected MemIntrinsicSDNode!");
- SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
- false, false, false,
- VT.getScalarSizeInBits()/8);
+ EVT VT = MemIntr->getMemoryVT();
- SDValue Results[] = {
- getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, DataToExpand),
- Mask, PassThru, Subtarget, DAG), Chain};
- return DAG.getMergeValues(Results, dl);
+ if (isAllOnesConstant(Mask)) // return just a truncate store
+ return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, VT,
+ MemIntr->getMemOperand());
+
+ MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
+ SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
+
+ return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, VMask, VT,
+ MemIntr->getMemOperand(), true);
}
- case LOADU:
- case LOADA: {
+ case EXPAND_FROM_MEM: {
SDValue Mask = Op.getOperand(4);
SDValue PassThru = Op.getOperand(3);
SDValue Addr = Op.getOperand(2);
@@ -17605,13 +18557,16 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
assert(MemIntr && "Expected MemIntrinsicSDNode!");
+ SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr,
+ MemIntr->getMemOperand());
+
if (isAllOnesConstant(Mask)) // return just a load
- return DAG.getLoad(VT, dl, Chain, Addr, MemIntr->getMemOperand());
+ return DataToExpand;
- MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
- SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
- return DAG.getMaskedLoad(VT, dl, Chain, Addr, VMask, PassThru, VT,
- MemIntr->getMemOperand(), ISD::NON_EXTLOAD);
+ SDValue Results[] = {
+ getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, DataToExpand),
+ Mask, PassThru, Subtarget, DAG), Chain};
+ return DAG.getMergeValues(Results, dl);
}
}
}
@@ -17630,25 +18585,24 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
- DAG.getNode(ISD::ADD, dl, PtrVT,
- FrameAddr, Offset),
- MachinePointerInfo(), false, false, false, 0);
+ DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
+ MachinePointerInfo());
}
// Just load the return address.
SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
- return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
- RetAddrFI, MachinePointerInfo(), false, false, false, 0);
+ return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
+ MachinePointerInfo());
}
SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
EVT VT = Op.getValueType();
MFI->setFrameAddressIsTaken(true);
@@ -17678,8 +18632,7 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
while (Depth--)
FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
- MachinePointerInfo(),
- false, false, false, 0);
+ MachinePointerInfo());
return FrameAddr;
}
@@ -17687,7 +18640,7 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
// this table could be generated automatically from RegInfo.
unsigned X86TargetLowering::getRegisterByName(const char* RegName, EVT VT,
SelectionDAG &DAG) const {
- const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
+ const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
const MachineFunction &MF = DAG.getMachineFunction();
unsigned Reg = StringSwitch<unsigned>(RegName)
@@ -17703,7 +18656,7 @@ unsigned X86TargetLowering::getRegisterByName(const char* RegName, EVT VT,
" is allocatable: function has no frame pointer");
#ifndef NDEBUG
else {
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned FrameReg =
RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
@@ -17720,23 +18673,27 @@ unsigned X86TargetLowering::getRegisterByName(const char* RegName, EVT VT,
SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
SelectionDAG &DAG) const {
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
}
unsigned X86TargetLowering::getExceptionPointerRegister(
const Constant *PersonalityFn) const {
if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
- return Subtarget->isTarget64BitLP64() ? X86::RDX : X86::EDX;
+ return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
- return Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX;
+ return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
}
unsigned X86TargetLowering::getExceptionSelectorRegister(
const Constant *PersonalityFn) const {
// Funclet personalities don't use selectors (the runtime does the selection).
assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)));
- return Subtarget->isTarget64BitLP64() ? X86::RDX : X86::EDX;
+ return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
+}
+
+bool X86TargetLowering::needsFixedCatchObjects() const {
+ return Subtarget.isTargetWin64();
}
SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
@@ -17746,7 +18703,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl (Op);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
(FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
@@ -17758,8 +18715,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
DAG.getIntPtrConstant(RegInfo->getSlotSize(),
dl));
StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
- Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
- false, false, 0);
+ Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
@@ -17769,6 +18725,16 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
+ // If the subtarget is not 64bit, we may need the global base reg
+ // after isel expand pseudo, i.e., after CGBR pass ran.
+ // Therefore, ask for the GlobalBaseReg now, so that the pass
+ // inserts the code for us in case we need it.
+ // Otherwise, we will end up in a situation where we will
+ // reference a virtual register that is not defined!
+ if (!Subtarget.is64Bit()) {
+ const X86InstrInfo *TII = Subtarget.getInstrInfo();
+ (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
+ }
return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
DAG.getVTList(MVT::i32, MVT::Other),
Op.getOperand(0), Op.getOperand(1));
@@ -17781,6 +18747,13 @@ SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
Op.getOperand(0), Op.getOperand(1));
}
+SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
+ Op.getOperand(0));
+}
+
static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
return Op.getOperand(0);
}
@@ -17794,9 +18767,9 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
SDLoc dl (Op);
const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
- const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
+ const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
SDValue OutChains[6];
// Large code-model.
@@ -17812,14 +18785,13 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
SDValue Addr = Trmp;
OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
- Addr, MachinePointerInfo(TrmpAddr),
- false, false, 0);
+ Addr, MachinePointerInfo(TrmpAddr));
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(2, dl, MVT::i64));
- OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
- MachinePointerInfo(TrmpAddr, 2),
- false, false, 2);
+ OutChains[1] =
+ DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2),
+ /* Alignment = */ 2);
// Load the 'nest' parameter value into R10.
// R10 is specified in X86CallingConv.td
@@ -17827,29 +18799,26 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(10, dl, MVT::i64));
OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
- Addr, MachinePointerInfo(TrmpAddr, 10),
- false, false, 0);
+ Addr, MachinePointerInfo(TrmpAddr, 10));
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(12, dl, MVT::i64));
- OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
- MachinePointerInfo(TrmpAddr, 12),
- false, false, 2);
+ OutChains[3] =
+ DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12),
+ /* Alignment = */ 2);
// Jump to the nested function.
OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(20, dl, MVT::i64));
OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
- Addr, MachinePointerInfo(TrmpAddr, 20),
- false, false, 0);
+ Addr, MachinePointerInfo(TrmpAddr, 20));
unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(22, dl, MVT::i64));
OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
- Addr, MachinePointerInfo(TrmpAddr, 22),
- false, false, 0);
+ Addr, MachinePointerInfo(TrmpAddr, 22));
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
} else {
@@ -17909,29 +18878,28 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
// This is storing the opcode for MOV32ri.
const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
- OutChains[0] = DAG.getStore(Root, dl,
- DAG.getConstant(MOV32ri|N86Reg, dl, MVT::i8),
- Trmp, MachinePointerInfo(TrmpAddr),
- false, false, 0);
+ OutChains[0] =
+ DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
+ Trmp, MachinePointerInfo(TrmpAddr));
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(1, dl, MVT::i32));
- OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
- MachinePointerInfo(TrmpAddr, 1),
- false, false, 1);
+ OutChains[1] =
+ DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1),
+ /* Alignment = */ 1);
const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(5, dl, MVT::i32));
OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
Addr, MachinePointerInfo(TrmpAddr, 5),
- false, false, 1);
+ /* Alignment = */ 1);
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(6, dl, MVT::i32));
- OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
- MachinePointerInfo(TrmpAddr, 6),
- false, false, 1);
+ OutChains[3] =
+ DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6),
+ /* Alignment = */ 1);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
@@ -17959,7 +18927,7 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
*/
MachineFunction &MF = DAG.getMachineFunction();
- const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
+ const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
unsigned StackAlignment = TFI.getStackAlignment();
MVT VT = Op.getSimpleValueType();
SDLoc DL(Op);
@@ -17979,8 +18947,8 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
Ops, MVT::i16, MMO);
// Load FP Control Word from stack slot
- SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
- MachinePointerInfo(), false, false, false, 0);
+ SDValue CWD =
+ DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MachinePointerInfo());
// Transform as necessary
SDValue CWD1 =
@@ -18014,6 +18982,7 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
// split the vector, perform operation on it's Lo a Hi part and
// concatenate the results.
static SDValue LowerVectorCTLZ_AVX512(SDValue Op, SelectionDAG &DAG) {
+ assert(Op.getOpcode() == ISD::CTLZ);
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
@@ -18044,8 +19013,8 @@ static SDValue LowerVectorCTLZ_AVX512(SDValue Op, SelectionDAG &DAG) {
std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
MVT OutVT = MVT::getVectorVT(EltVT, NumElems/2);
- Lo = DAG.getNode(Op.getOpcode(), dl, OutVT, Lo);
- Hi = DAG.getNode(Op.getOpcode(), dl, OutVT, Hi);
+ Lo = DAG.getNode(ISD::CTLZ, dl, OutVT, Lo);
+ Hi = DAG.getNode(ISD::CTLZ, dl, OutVT, Hi);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
}
@@ -18064,51 +19033,112 @@ static SDValue LowerVectorCTLZ_AVX512(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
}
-static SDValue LowerCTLZ(SDValue Op, const X86Subtarget *Subtarget,
- SelectionDAG &DAG) {
+// Lower CTLZ using a PSHUFB lookup table implementation.
+static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
+ const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
- MVT OpVT = VT;
- unsigned NumBits = VT.getSizeInBits();
- SDLoc dl(Op);
+ int NumElts = VT.getVectorNumElements();
+ int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
+ MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
- if (VT.isVector() && Subtarget->hasAVX512())
- return LowerVectorCTLZ_AVX512(Op, DAG);
+ // Per-nibble leading zero PSHUFB lookup table.
+ const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
+ /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
+ /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
+ /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
- Op = Op.getOperand(0);
- if (VT == MVT::i8) {
- // Zero extend to i32 since there is not an i8 bsr.
- OpVT = MVT::i32;
- Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
+ SmallVector<SDValue, 64> LUTVec;
+ for (int i = 0; i < NumBytes; ++i)
+ LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
+ SDValue InRegLUT = DAG.getNode(ISD::BUILD_VECTOR, DL, CurrVT, LUTVec);
+
+ // Begin by bitcasting the input to byte vector, then split those bytes
+ // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
+ // If the hi input nibble is zero then we add both results together, otherwise
+ // we just take the hi result (by masking the lo result to zero before the
+ // add).
+ SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
+ SDValue Zero = getZeroVector(CurrVT, Subtarget, DAG, DL);
+
+ SDValue NibbleMask = DAG.getConstant(0xF, DL, CurrVT);
+ SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
+ SDValue Lo = DAG.getNode(ISD::AND, DL, CurrVT, Op0, NibbleMask);
+ SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
+ SDValue HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
+
+ Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
+ Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
+ Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
+ SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
+
+ // Merge result back from vXi8 back to VT, working on the lo/hi halves
+ // of the current vector width in the same way we did for the nibbles.
+ // If the upper half of the input element is zero then add the halves'
+ // leading zero counts together, otherwise just use the upper half's.
+ // Double the width of the result until we are at target width.
+ while (CurrVT != VT) {
+ int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
+ int CurrNumElts = CurrVT.getVectorNumElements();
+ MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
+ MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
+ SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
+
+ // Check if the upper half of the input element is zero.
+ SDValue HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
+ DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
+ HiZ = DAG.getBitcast(NextVT, HiZ);
+
+ // Move the upper/lower halves to the lower bits as we'll be extending to
+ // NextVT. Mask the lower result to zero if HiZ is true and add the results
+ // together.
+ SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
+ SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
+ SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
+ R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
+ Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
+ CurrVT = NextVT;
}
- // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
- SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
- Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
+ return Res;
+}
- // If src is zero (i.e. bsr sets ZF), returns NumBits.
- SDValue Ops[] = {
- Op,
- DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
- DAG.getConstant(X86::COND_E, dl, MVT::i8),
- Op.getValue(1)
- };
- Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
+static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
+ const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
+ MVT VT = Op.getSimpleValueType();
+ SDValue Op0 = Op.getOperand(0);
- // Finally xor with NumBits-1.
- Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
- DAG.getConstant(NumBits - 1, dl, OpVT));
+ if (Subtarget.hasAVX512())
+ return LowerVectorCTLZ_AVX512(Op, DAG);
- if (VT == MVT::i8)
- Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
- return Op;
+ // Decompose 256-bit ops into smaller 128-bit ops.
+ if (VT.is256BitVector() && !Subtarget.hasInt256()) {
+ unsigned NumElems = VT.getVectorNumElements();
+
+ // Extract each 128-bit vector, perform ctlz and concat the result.
+ SDValue LHS = extract128BitVector(Op0, 0, DAG, DL);
+ SDValue RHS = extract128BitVector(Op0, NumElems / 2, DAG, DL);
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
+ DAG.getNode(ISD::CTLZ, DL, LHS.getValueType(), LHS),
+ DAG.getNode(ISD::CTLZ, DL, RHS.getValueType(), RHS));
+ }
+
+ assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
+ return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
}
-static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, const X86Subtarget *Subtarget,
- SelectionDAG &DAG) {
+static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
- EVT OpVT = VT;
+ MVT OpVT = VT;
unsigned NumBits = VT.getSizeInBits();
SDLoc dl(Op);
+ unsigned Opc = Op.getOpcode();
+
+ if (VT.isVector())
+ return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
Op = Op.getOperand(0);
if (VT == MVT::i8) {
@@ -18117,11 +19147,22 @@ static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, const X86Subtarget *Subtarget,
Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
}
- // Issue a bsr (scan bits in reverse).
+ // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
- // And xor with NumBits-1.
+ if (Opc == ISD::CTLZ) {
+ // If src is zero (i.e. bsr sets ZF), returns NumBits.
+ SDValue Ops[] = {
+ Op,
+ DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
+ DAG.getConstant(X86::COND_E, dl, MVT::i8),
+ Op.getValue(1)
+ };
+ Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
+ }
+
+ // Finally xor with NumBits-1.
Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
DAG.getConstant(NumBits - 1, dl, OpVT));
@@ -18136,8 +19177,6 @@ static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
SDLoc dl(Op);
if (VT.isVector()) {
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-
SDValue N0 = Op.getOperand(0);
SDValue Zero = DAG.getConstant(0, dl, VT);
@@ -18146,8 +19185,7 @@ static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
DAG.getNode(ISD::SUB, dl, VT, Zero, N0));
// cttz_undef(x) = (width - 1) - ctlz(lsb)
- if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF &&
- TLI.isOperationLegal(ISD::CTLZ, VT)) {
+ if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
SDValue WidthMinusOne = DAG.getConstant(NumBits - 1, dl, VT);
return DAG.getNode(ISD::SUB, dl, VT, WidthMinusOne,
DAG.getNode(ISD::CTLZ, dl, VT, LSB));
@@ -18176,8 +19214,8 @@ static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
}
-// Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
-// ones, and then concatenate the result back.
+/// Break a 256-bit integer operation into two new 128-bit ones and then
+/// concatenate the result back.
static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
@@ -18189,13 +19227,42 @@ static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
// Extract the LHS vectors
SDValue LHS = Op.getOperand(0);
- SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
- SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
+ SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
+ SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
// Extract the RHS vectors
SDValue RHS = Op.getOperand(1);
- SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
- SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
+ SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
+ SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
+
+ MVT EltVT = VT.getVectorElementType();
+ MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
+ DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
+ DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
+}
+
+/// Break a 512-bit integer operation into two new 256-bit ones and then
+/// concatenate the result back.
+static SDValue Lower512IntArith(SDValue Op, SelectionDAG &DAG) {
+ MVT VT = Op.getSimpleValueType();
+
+ assert(VT.is512BitVector() && VT.isInteger() &&
+ "Unsupported value type for operation");
+
+ unsigned NumElems = VT.getVectorNumElements();
+ SDLoc dl(Op);
+
+ // Extract the LHS vectors
+ SDValue LHS = Op.getOperand(0);
+ SDValue LHS1 = extract256BitVector(LHS, 0, DAG, dl);
+ SDValue LHS2 = extract256BitVector(LHS, NumElems / 2, DAG, dl);
+
+ // Extract the RHS vectors
+ SDValue RHS = Op.getOperand(1);
+ SDValue RHS1 = extract256BitVector(RHS, 0, DAG, dl);
+ SDValue RHS2 = extract256BitVector(RHS, NumElems / 2, DAG, dl);
MVT EltVT = VT.getVectorElementType();
MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
@@ -18232,7 +19299,7 @@ static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
return Lower256IntArith(Op, DAG);
}
-static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
@@ -18241,28 +19308,26 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1));
// Decompose 256-bit ops into smaller 128-bit ops.
- if (VT.is256BitVector() && !Subtarget->hasInt256())
+ if (VT.is256BitVector() && !Subtarget.hasInt256())
return Lower256IntArith(Op, DAG);
SDValue A = Op.getOperand(0);
SDValue B = Op.getOperand(1);
- // Lower v16i8/v32i8 mul as promotion to v8i16/v16i16 vector
- // pairs, multiply and truncate.
- if (VT == MVT::v16i8 || VT == MVT::v32i8) {
- if (Subtarget->hasInt256()) {
- if (VT == MVT::v32i8) {
- MVT SubVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() / 2);
- SDValue Lo = DAG.getIntPtrConstant(0, dl);
- SDValue Hi = DAG.getIntPtrConstant(VT.getVectorNumElements() / 2, dl);
- SDValue ALo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, A, Lo);
- SDValue BLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, B, Lo);
- SDValue AHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, A, Hi);
- SDValue BHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, B, Hi);
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
- DAG.getNode(ISD::MUL, dl, SubVT, ALo, BLo),
- DAG.getNode(ISD::MUL, dl, SubVT, AHi, BHi));
- }
+ // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
+ // vector pairs, multiply and truncate.
+ if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
+ if (Subtarget.hasInt256()) {
+ // For 512-bit vectors, split into 256-bit vectors to allow the
+ // sign-extension to occur.
+ if (VT == MVT::v64i8)
+ return Lower512IntArith(Op, DAG);
+
+ // For 256-bit vectors, split into 128-bit vectors to allow the
+ // sign-extension to occur. We don't need this on AVX512BW as we can
+ // safely sign-extend to v32i16.
+ if (VT == MVT::v32i8 && !Subtarget.hasBWI())
+ return Lower256IntArith(Op, DAG);
MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
return DAG.getNode(
@@ -18278,7 +19343,7 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
// Extract the lo parts and sign extend to i16
SDValue ALo, BLo;
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
ALo = DAG.getNode(X86ISD::VSEXT, dl, ExVT, A);
BLo = DAG.getNode(X86ISD::VSEXT, dl, ExVT, B);
} else {
@@ -18294,7 +19359,7 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
// Extract the hi parts and sign extend to i16
SDValue AHi, BHi;
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
const int ShufMask[] = {8, 9, 10, 11, 12, 13, 14, 15,
-1, -1, -1, -1, -1, -1, -1, -1};
AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
@@ -18322,7 +19387,7 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
// Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
if (VT == MVT::v4i32) {
- assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
+ assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
"Should not custom lower when pmuldq is available!");
// Extract the odd parts.
@@ -18386,8 +19451,122 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
}
+static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
+ SDLoc dl(Op);
+ MVT VT = Op.getSimpleValueType();
+
+ // Decompose 256-bit ops into smaller 128-bit ops.
+ if (VT.is256BitVector() && !Subtarget.hasInt256())
+ return Lower256IntArith(Op, DAG);
+
+ // Only i8 vectors should need custom lowering after this.
+ assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256())) &&
+ "Unsupported vector type");
+
+ // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
+ // logical shift down the upper half and pack back to i8.
+ SDValue A = Op.getOperand(0);
+ SDValue B = Op.getOperand(1);
+
+ // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
+ // and then ashr/lshr the upper bits down to the lower bits before multiply.
+ unsigned Opcode = Op.getOpcode();
+ unsigned ExShift = (ISD::MULHU == Opcode ? ISD::SRL : ISD::SRA);
+ unsigned ExSSE41 = (ISD::MULHU == Opcode ? X86ISD::VZEXT : X86ISD::VSEXT);
+
+ // AVX2 implementations - extend xmm subvectors to ymm.
+ if (Subtarget.hasInt256()) {
+ SDValue Lo = DAG.getIntPtrConstant(0, dl);
+ SDValue Hi = DAG.getIntPtrConstant(VT.getVectorNumElements() / 2, dl);
+
+ if (VT == MVT::v32i8) {
+ SDValue ALo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, A, Lo);
+ SDValue BLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, B, Lo);
+ SDValue AHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, A, Hi);
+ SDValue BHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, B, Hi);
+ ALo = DAG.getNode(ExSSE41, dl, MVT::v16i16, ALo);
+ BLo = DAG.getNode(ExSSE41, dl, MVT::v16i16, BLo);
+ AHi = DAG.getNode(ExSSE41, dl, MVT::v16i16, AHi);
+ BHi = DAG.getNode(ExSSE41, dl, MVT::v16i16, BHi);
+ Lo = DAG.getNode(ISD::SRL, dl, MVT::v16i16,
+ DAG.getNode(ISD::MUL, dl, MVT::v16i16, ALo, BLo),
+ DAG.getConstant(8, dl, MVT::v16i16));
+ Hi = DAG.getNode(ISD::SRL, dl, MVT::v16i16,
+ DAG.getNode(ISD::MUL, dl, MVT::v16i16, AHi, BHi),
+ DAG.getConstant(8, dl, MVT::v16i16));
+ // The ymm variant of PACKUS treats the 128-bit lanes separately, so before
+ // using PACKUS we need to permute the inputs to the correct lo/hi xmm lane.
+ const int LoMask[] = {0, 1, 2, 3, 4, 5, 6, 7,
+ 16, 17, 18, 19, 20, 21, 22, 23};
+ const int HiMask[] = {8, 9, 10, 11, 12, 13, 14, 15,
+ 24, 25, 26, 27, 28, 29, 30, 31};
+ return DAG.getNode(X86ISD::PACKUS, dl, VT,
+ DAG.getVectorShuffle(MVT::v16i16, dl, Lo, Hi, LoMask),
+ DAG.getVectorShuffle(MVT::v16i16, dl, Lo, Hi, HiMask));
+ }
+
+ SDValue ExA = DAG.getNode(ExSSE41, dl, MVT::v16i16, A);
+ SDValue ExB = DAG.getNode(ExSSE41, dl, MVT::v16i16, B);
+ SDValue Mul = DAG.getNode(ISD::MUL, dl, MVT::v16i16, ExA, ExB);
+ SDValue MulH = DAG.getNode(ISD::SRL, dl, MVT::v16i16, Mul,
+ DAG.getConstant(8, dl, MVT::v16i16));
+ Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, MulH, Lo);
+ Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, MulH, Hi);
+ return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
+ }
+
+ assert(VT == MVT::v16i8 &&
+ "Pre-AVX2 support only supports v16i8 multiplication");
+ MVT ExVT = MVT::v8i16;
+
+ // Extract the lo parts and zero/sign extend to i16.
+ SDValue ALo, BLo;
+ if (Subtarget.hasSSE41()) {
+ ALo = DAG.getNode(ExSSE41, dl, ExVT, A);
+ BLo = DAG.getNode(ExSSE41, dl, ExVT, B);
+ } else {
+ const int ShufMask[] = {-1, 0, -1, 1, -1, 2, -1, 3,
+ -1, 4, -1, 5, -1, 6, -1, 7};
+ ALo = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
+ BLo = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
+ ALo = DAG.getBitcast(ExVT, ALo);
+ BLo = DAG.getBitcast(ExVT, BLo);
+ ALo = DAG.getNode(ExShift, dl, ExVT, ALo, DAG.getConstant(8, dl, ExVT));
+ BLo = DAG.getNode(ExShift, dl, ExVT, BLo, DAG.getConstant(8, dl, ExVT));
+ }
+
+ // Extract the hi parts and zero/sign extend to i16.
+ SDValue AHi, BHi;
+ if (Subtarget.hasSSE41()) {
+ const int ShufMask[] = {8, 9, 10, 11, 12, 13, 14, 15,
+ -1, -1, -1, -1, -1, -1, -1, -1};
+ AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
+ BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
+ AHi = DAG.getNode(ExSSE41, dl, ExVT, AHi);
+ BHi = DAG.getNode(ExSSE41, dl, ExVT, BHi);
+ } else {
+ const int ShufMask[] = {-1, 8, -1, 9, -1, 10, -1, 11,
+ -1, 12, -1, 13, -1, 14, -1, 15};
+ AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
+ BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
+ AHi = DAG.getBitcast(ExVT, AHi);
+ BHi = DAG.getBitcast(ExVT, BHi);
+ AHi = DAG.getNode(ExShift, dl, ExVT, AHi, DAG.getConstant(8, dl, ExVT));
+ BHi = DAG.getNode(ExShift, dl, ExVT, BHi, DAG.getConstant(8, dl, ExVT));
+ }
+
+ // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
+ // pack back to v16i8.
+ SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
+ SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
+ RLo = DAG.getNode(ISD::SRL, dl, ExVT, RLo, DAG.getConstant(8, dl, ExVT));
+ RHi = DAG.getNode(ISD::SRL, dl, ExVT, RHi, DAG.getConstant(8, dl, ExVT));
+ return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
+}
+
SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
- assert(Subtarget->isTargetWin64() && "Unexpected target");
+ assert(Subtarget.isTargetWin64() && "Unexpected target");
EVT VT = Op.getValueType();
assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
"Unexpected return type for lowering");
@@ -18415,8 +19594,8 @@ SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) cons
"Unexpected argument type for lowering");
SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
Entry.Node = StackPtr;
- InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
- false, false, 16);
+ InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr,
+ MachinePointerInfo(), /* Alignment = */ 16);
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Ty = PointerType::get(ArgTy,0);
Entry.isSExt = false;
@@ -18431,21 +19610,39 @@ SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) cons
CLI.setDebugLoc(dl).setChain(InChain)
.setCallee(getLibcallCallingConv(LC),
static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
- Callee, std::move(Args), 0)
+ Callee, std::move(Args))
.setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
return DAG.getBitcast(VT, CallInfo.first);
}
-static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
MVT VT = Op0.getSimpleValueType();
SDLoc dl(Op);
- assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
- (VT == MVT::v8i32 && Subtarget->hasInt256()));
+ // Decompose 256-bit ops into smaller 128-bit ops.
+ if (VT.is256BitVector() && !Subtarget.hasInt256()) {
+ unsigned Opcode = Op.getOpcode();
+ unsigned NumElems = VT.getVectorNumElements();
+ MVT HalfVT = MVT::getVectorVT(VT.getScalarType(), NumElems / 2);
+ SDValue Lo0 = extract128BitVector(Op0, 0, DAG, dl);
+ SDValue Lo1 = extract128BitVector(Op1, 0, DAG, dl);
+ SDValue Hi0 = extract128BitVector(Op0, NumElems / 2, DAG, dl);
+ SDValue Hi1 = extract128BitVector(Op1, NumElems / 2, DAG, dl);
+ SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(HalfVT, HalfVT), Lo0, Lo1);
+ SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(HalfVT, HalfVT), Hi0, Hi1);
+ SDValue Ops[] = {
+ DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo.getValue(0), Hi.getValue(0)),
+ DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo.getValue(1), Hi.getValue(1))
+ };
+ return DAG.getMergeValues(Ops, dl);
+ }
+
+ assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
+ (VT == MVT::v8i32 && Subtarget.hasInt256()));
// PMULxD operations multiply each even value (starting at 0) of LHS with
// the related value of RHS and produce a widen result.
@@ -18461,16 +19658,18 @@ static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
// step to the left):
const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
// <a|b|c|d> => <b|undef|d|undef>
- SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
+ SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0,
+ makeArrayRef(&Mask[0], VT.getVectorNumElements()));
// <e|f|g|h> => <f|undef|h|undef>
- SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
+ SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1,
+ makeArrayRef(&Mask[0], VT.getVectorNumElements()));
// Emit two multiplies, one for the lower 2 ints and one for the higher 2
// ints.
MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
unsigned Opcode =
- (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
+ (!IsSigned || !Subtarget.hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
// PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
// => <2 x i64> <ae|cg>
SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
@@ -18494,7 +19693,7 @@ static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
// If we have a signed multiply but no PMULDQ fix up the high parts of a
// unsigned multiply.
- if (IsSigned && !Subtarget->hasSSE41()) {
+ if (IsSigned && !Subtarget.hasSSE41()) {
SDValue ShAmt = DAG.getConstant(
31, dl,
DAG.getTargetLoweringInfo().getShiftAmountTy(VT, DAG.getDataLayout()));
@@ -18515,19 +19714,19 @@ static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
// Return true if the required (according to Opcode) shift-imm form is natively
// supported by the Subtarget
-static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget *Subtarget,
+static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
unsigned Opcode) {
if (VT.getScalarSizeInBits() < 16)
return false;
if (VT.is512BitVector() &&
- (VT.getScalarSizeInBits() > 16 || Subtarget->hasBWI()))
+ (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
return true;
bool LShift = VT.is128BitVector() ||
- (VT.is256BitVector() && Subtarget->hasInt256());
+ (VT.is256BitVector() && Subtarget.hasInt256());
- bool AShift = LShift && (Subtarget->hasVLX() ||
+ bool AShift = LShift && (Subtarget.hasVLX() ||
(VT != MVT::v2i64 && VT != MVT::v4i64));
return (Opcode == ISD::SRA) ? AShift : LShift;
}
@@ -18535,24 +19734,24 @@ static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget *Subtarget,
// The shift amount is a variable, but it is the same for all vector lanes.
// These instructions are defined together with shift-immediate.
static
-bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget *Subtarget,
+bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
unsigned Opcode) {
return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
}
// Return true if the required (according to Opcode) variable-shift form is
// natively supported by the Subtarget
-static bool SupportedVectorVarShift(MVT VT, const X86Subtarget *Subtarget,
+static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
unsigned Opcode) {
- if (!Subtarget->hasInt256() || VT.getScalarSizeInBits() < 16)
+ if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
return false;
// vXi16 supported only on AVX-512, BWI
- if (VT.getScalarSizeInBits() == 16 && !Subtarget->hasBWI())
+ if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
return false;
- if (VT.is512BitVector() || Subtarget->hasVLX())
+ if (VT.is512BitVector() || Subtarget.hasVLX())
return true;
bool LShift = VT.is128BitVector() || VT.is256BitVector();
@@ -18561,7 +19760,7 @@ static bool SupportedVectorVarShift(MVT VT, const X86Subtarget *Subtarget,
}
static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
SDValue R = Op.getOperand(0);
@@ -18611,12 +19810,12 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
// i64 SRA needs to be performed as partial shifts.
- if ((VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
- Op.getOpcode() == ISD::SRA && !Subtarget->hasXOP())
+ if ((VT == MVT::v2i64 || (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
+ Op.getOpcode() == ISD::SRA && !Subtarget.hasXOP())
return ArithmeticShiftRight64(ShiftAmt);
if (VT == MVT::v16i8 ||
- (Subtarget->hasInt256() && VT == MVT::v32i8) ||
+ (Subtarget.hasInt256() && VT == MVT::v32i8) ||
VT == MVT::v64i8) {
unsigned NumElts = VT.getVectorNumElements();
MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
@@ -18628,11 +19827,16 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
// ashr(R, 7) === cmp_slt(R, 0)
if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
+ if (VT.is512BitVector()) {
+ assert(VT == MVT::v64i8 && "Unexpected element type!");
+ SDValue CMP = DAG.getNode(X86ISD::PCMPGTM, dl, MVT::v64i1, Zeros, R);
+ return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
+ }
return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
}
// XOP can shift v16i8 directly instead of as shift v8i16 + mask.
- if (VT == MVT::v16i8 && Subtarget->hasXOP())
+ if (VT == MVT::v16i8 && Subtarget.hasXOP())
return SDValue();
if (Op.getOpcode() == ISD::SHL) {
@@ -18668,8 +19872,8 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
}
// Special case in 32-bit mode, where i64 is expanded into high and low parts.
- if (!Subtarget->is64Bit() && !Subtarget->hasXOP() &&
- (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64))) {
+ if (!Subtarget.is64Bit() && !Subtarget.hasXOP() &&
+ (VT == MVT::v2i64 || (Subtarget.hasInt256() && VT == MVT::v4i64))) {
// Peek through any splat that was introduced for i64 shift vectorization.
int SplatIndex = -1;
@@ -18726,7 +19930,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
}
static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
- const X86Subtarget* Subtarget) {
+ const X86Subtarget &Subtarget) {
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
SDValue R = Op.getOperand(0);
@@ -18746,7 +19950,7 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
// Check if this build_vector node is doing a splat.
// If so, then set BaseShAmt equal to the splat value.
BaseShAmt = BV->getSplatValue();
- if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
+ if (BaseShAmt && BaseShAmt.isUndef())
BaseShAmt = SDValue();
} else {
if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
@@ -18787,7 +19991,7 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
}
// Special case in 32-bit mode, where i64 is expanded into high and low parts.
- if (!Subtarget->is64Bit() && VT == MVT::v2i64 &&
+ if (!Subtarget.is64Bit() && VT == MVT::v2i64 &&
Amt.getOpcode() == ISD::BITCAST &&
Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
Amt = Amt.getOperand(0);
@@ -18808,15 +20012,16 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
return SDValue();
}
-static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
+static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
+ bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
assert(VT.isVector() && "Custom lowering only for vector shifts!");
- assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
+ assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget))
return V;
@@ -18829,7 +20034,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
// XOP has 128-bit variable logical/arithmetic shifts.
// +ve/-ve Amt = shift left/right.
- if (Subtarget->hasXOP() &&
+ if (Subtarget.hasXOP() &&
(VT == MVT::v2i64 || VT == MVT::v4i32 ||
VT == MVT::v8i16 || VT == MVT::v16i8)) {
if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SRA) {
@@ -18856,7 +20061,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
// i64 vector arithmetic shift can be emulated with the transform:
// M = lshr(SIGN_BIT, Amt)
// ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
- if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget->hasInt256())) &&
+ if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
Op.getOpcode() == ISD::SRA) {
SDValue S = DAG.getConstant(APInt::getSignBit(64), dl, VT);
SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
@@ -18869,10 +20074,9 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
// If possible, lower this packed shift into a vector multiply instead of
// expanding it into a sequence of scalar shifts.
// Do this only if the vector shift count is a constant build_vector.
- if (Op.getOpcode() == ISD::SHL &&
+ if (ConstantAmt && Op.getOpcode() == ISD::SHL &&
(VT == MVT::v8i16 || VT == MVT::v4i32 ||
- (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
- ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
+ (Subtarget.hasInt256() && VT == MVT::v16i16))) {
SmallVector<SDValue, 8> Elts;
MVT SVT = VT.getVectorElementType();
unsigned SVTBits = SVT.getSizeInBits();
@@ -18881,7 +20085,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
for (unsigned i=0; i !=NumElems; ++i) {
SDValue Op = Amt->getOperand(i);
- if (Op->getOpcode() == ISD::UNDEF) {
+ if (Op->isUndef()) {
Elts.push_back(Op);
continue;
}
@@ -18895,7 +20099,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
}
Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
}
- SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
+ SDValue BV = DAG.getBuildVector(VT, dl, Elts);
return DAG.getNode(ISD::MUL, dl, VT, R, BV);
}
@@ -18922,15 +20126,13 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
// lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
// the vector shift into four scalar shifts plus four pairs of vector
// insert/extract.
- if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
- ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
+ if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32)) {
unsigned TargetOpcode = X86ISD::MOVSS;
bool CanBeSimplified;
// The splat value for the first packed shift (the 'X' from the example).
SDValue Amt1 = Amt->getOperand(0);
// The splat value for the second packed shift (the 'Y' from the example).
- SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
- Amt->getOperand(2);
+ SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) : Amt->getOperand(2);
// See if it is possible to replace this node with a sequence of
// two shifts followed by a MOVSS/MOVSD
@@ -18991,7 +20193,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
if (VT == MVT::v4i32) {
unsigned Opc = Op.getOpcode();
SDValue Amt0, Amt1, Amt2, Amt3;
- if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
+ if (ConstantAmt) {
Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
@@ -19031,14 +20233,14 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
}
if (VT == MVT::v16i8 ||
- (VT == MVT::v32i8 && Subtarget->hasInt256() && !Subtarget->hasXOP())) {
+ (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP())) {
MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
unsigned ShiftOpcode = Op->getOpcode();
auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
// On SSE41 targets we make use of the fact that VSELECT lowers
// to PBLENDVB which selects bytes based just on the sign bit.
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
V0 = DAG.getBitcast(VT, V0);
V1 = DAG.getBitcast(VT, V1);
Sel = DAG.getBitcast(VT, Sel);
@@ -19141,7 +20343,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
// It's worth extending once and using the v8i32 shifts for 16-bit types, but
// the extra overheads to get from v16i8 to v8i32 make the existing SSE
// solution better.
- if (Subtarget->hasInt256() && VT == MVT::v8i16) {
+ if (Subtarget.hasInt256() && VT == MVT::v8i16) {
MVT ExtVT = MVT::v8i32;
unsigned ExtOpc =
Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
@@ -19151,13 +20353,13 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
DAG.getNode(Op.getOpcode(), dl, ExtVT, R, Amt));
}
- if (Subtarget->hasInt256() && !Subtarget->hasXOP() && VT == MVT::v16i16) {
+ if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
MVT ExtVT = MVT::v8i32;
SDValue Z = getZeroVector(VT, Subtarget, DAG, dl);
SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, Amt, Z);
SDValue AHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, Amt, Z);
- SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, R, R);
- SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, R, R);
+ SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, Z, R);
+ SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, Z, R);
ALo = DAG.getBitcast(ExtVT, ALo);
AHi = DAG.getBitcast(ExtVT, AHi);
RLo = DAG.getBitcast(ExtVT, RLo);
@@ -19172,10 +20374,15 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
if (VT == MVT::v8i16) {
unsigned ShiftOpcode = Op->getOpcode();
+ // If we have a constant shift amount, the non-SSE41 path is best as
+ // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
+ bool UseSSE41 = Subtarget.hasSSE41() &&
+ !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
+
auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
// On SSE41 targets we make use of the fact that VSELECT lowers
// to PBLENDVB which selects bytes based just on the sign bit.
- if (Subtarget->hasSSE41()) {
+ if (UseSSE41) {
MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
V0 = DAG.getBitcast(ExtVT, V0);
V1 = DAG.getBitcast(ExtVT, V1);
@@ -19192,7 +20399,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
};
// Turn 'a' into a mask suitable for VSELECT: a = a << 12;
- if (Subtarget->hasSSE41()) {
+ if (UseSSE41) {
// On SSE41 targets we need to replicate the shift mask in both
// bytes for PBLENDVB.
Amt = DAG.getNode(
@@ -19231,43 +20438,13 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
}
// Decompose 256-bit shifts into smaller 128-bit shifts.
- if (VT.is256BitVector()) {
- unsigned NumElems = VT.getVectorNumElements();
- MVT EltVT = VT.getVectorElementType();
- MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
-
- // Extract the two vectors
- SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
- SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
-
- // Recreate the shift amount vectors
- SDValue Amt1, Amt2;
- if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
- // Constant shift amount
- SmallVector<SDValue, 8> Ops(Amt->op_begin(), Amt->op_begin() + NumElems);
- ArrayRef<SDValue> Amt1Csts = makeArrayRef(Ops).slice(0, NumElems / 2);
- ArrayRef<SDValue> Amt2Csts = makeArrayRef(Ops).slice(NumElems / 2);
-
- Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
- Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
- } else {
- // Variable shift amount
- Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
- Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
- }
-
- // Issue new vector shifts for the smaller types
- V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
- V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
-
- // Concatenate the result back
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
- }
+ if (VT.is256BitVector())
+ return Lower256IntArith(Op, DAG);
return SDValue();
}
-static SDValue LowerRotate(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
SDLoc DL(Op);
@@ -19275,7 +20452,7 @@ static SDValue LowerRotate(SDValue Op, const X86Subtarget *Subtarget,
SDValue Amt = Op.getOperand(1);
assert(VT.isVector() && "Custom lowering only for vector rotates!");
- assert(Subtarget->hasXOP() && "XOP support required for vector rotates!");
+ assert(Subtarget.hasXOP() && "XOP support required for vector rotates!");
assert((Op.getOpcode() == ISD::ROTL) && "Only ROTL supported");
// XOP has 128-bit vector variable + immediate rotates.
@@ -19363,6 +20540,11 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
DAG.getConstant(X86::COND_O, DL, MVT::i32),
SDValue(Sum.getNode(), 2));
+ if (N->getValueType(1) == MVT::i1) {
+ SetCC = DAG.getNode(ISD::AssertZext, DL, MVT::i8, SetCC,
+ DAG.getValueType(MVT::i1));
+ SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
+ }
return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
}
}
@@ -19372,10 +20554,15 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
SDValue SetCC =
- DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
+ DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
DAG.getConstant(Cond, DL, MVT::i32),
SDValue(Sum.getNode(), 1));
+ if (N->getValueType(1) == MVT::i1) {
+ SetCC = DAG.getNode(ISD::AssertZext, DL, MVT::i8, SetCC,
+ DAG.getValueType(MVT::i1));
+ SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
+ }
return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
}
@@ -19387,9 +20574,9 @@ bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
unsigned OpWidth = MemType->getPrimitiveSizeInBits();
if (OpWidth == 64)
- return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
+ return !Subtarget.is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
else if (OpWidth == 128)
- return Subtarget->hasCmpxchg16b();
+ return Subtarget.hasCmpxchg16b();
else
return false;
}
@@ -19409,7 +20596,7 @@ X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
TargetLowering::AtomicExpansionKind
X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
- unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
+ unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
Type *MemType = AI->getType();
// If the operand is too big, we must see if cmpxchg8/16b is available
@@ -19446,16 +20633,9 @@ X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
}
}
-static bool hasMFENCE(const X86Subtarget& Subtarget) {
- // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
- // no-sse2). There isn't any reason to disable it if the target processor
- // supports it.
- return Subtarget.hasSSE2() || Subtarget.is64Bit();
-}
-
LoadInst *
X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
- unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
+ unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
Type *MemType = AI->getType();
// Accesses larger than the native width are turned into cmpxchg/libcalls, so
// there is no benefit in turning such RMWs into loads, and it is actually
@@ -19483,7 +20663,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
// r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
// lowered to just a load without a fence. A mfence flushes the store buffer,
// making the optimization clearly correct.
- // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
+ // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
// otherwise, we might be able to be more aggressive on relaxed idempotent
// rmw. In practice, they do not look useful, so we don't try to be
// especially clever.
@@ -19492,7 +20672,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
// the IR level, so we must wrap it in an intrinsic.
return nullptr;
- if (!hasMFENCE(*Subtarget))
+ if (!Subtarget.hasMFence())
// FIXME: it might make sense to use a locked operation here but on a
// different cache-line to prevent cache-line bouncing. In practice it
// is probably a small win, and x86 processors without mfence are rare
@@ -19512,7 +20692,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
return Loaded;
}
-static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc dl(Op);
AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
@@ -19522,8 +20702,9 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
// The only fence that needs an instruction is a sequentially-consistent
// cross-thread fence.
- if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
- if (hasMFENCE(*Subtarget))
+ if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
+ FenceScope == CrossThread) {
+ if (Subtarget.hasMFence())
return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
SDValue Chain = Op.getOperand(0);
@@ -19545,7 +20726,7 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
}
-static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT T = Op.getSimpleValueType();
SDLoc DL(Op);
@@ -19557,7 +20738,7 @@ static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
case MVT::i16: Reg = X86::AX; size = 2; break;
case MVT::i32: Reg = X86::EAX; size = 4; break;
case MVT::i64:
- assert(Subtarget->is64Bit() && "Node not type legal!");
+ assert(Subtarget.is64Bit() && "Node not type legal!");
Reg = X86::RAX; size = 8;
break;
}
@@ -19587,14 +20768,14 @@ static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
return SDValue();
}
-static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT SrcVT = Op.getOperand(0).getSimpleValueType();
MVT DstVT = Op.getSimpleValueType();
if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
SrcVT == MVT::i64) {
- assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
+ assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
if (DstVT != MVT::f64)
// This conversion needs to be expanded.
return SDValue();
@@ -19614,7 +20795,7 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, Op0,
DAG.getIntPtrConstant(i, dl)));
} else {
- assert(SrcVT == MVT::i64 && !Subtarget->is64Bit() &&
+ assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
"Unexpected source type in LowerBITCAST");
Elts.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op0,
DAG.getIntPtrConstant(0, dl)));
@@ -19627,14 +20808,14 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
Elts.append(NumElts, DAG.getUNDEF(SVT));
EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
- SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
+ SDValue BV = DAG.getBuildVector(NewVT, dl, Elts);
SDValue ToV2F64 = DAG.getBitcast(MVT::v2f64, BV);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
DAG.getIntPtrConstant(0, dl));
}
- assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
- Subtarget->hasMMX() && "Unexpected custom BITCAST");
+ assert(Subtarget.is64Bit() && !Subtarget.hasSSE2() &&
+ Subtarget.hasMMX() && "Unexpected custom BITCAST");
assert((DstVT == MVT::i64 ||
(DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
"Unexpected custom BITCAST");
@@ -19657,12 +20838,11 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
/// how many bytes of V are summed horizontally to produce each element of the
/// result.
static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc DL(V);
MVT ByteVecVT = V.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
- int NumElts = VT.getVectorNumElements();
assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
"Expected value to have byte element type.");
assert(EltVT != MVT::i8 &&
@@ -19713,16 +20893,15 @@ static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
// i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
// right by 8. It is important to shift as i16s as i8 vector shift isn't
// directly supported.
- SmallVector<SDValue, 16> Shifters(NumElts, DAG.getConstant(8, DL, EltVT));
- SDValue Shifter = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Shifters);
- SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), Shifter);
+ SDValue ShifterV = DAG.getConstant(8, DL, VT);
+ SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
DAG.getBitcast(ByteVecVT, V));
- return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), Shifter);
+ return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
}
-static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, SDLoc DL,
- const X86Subtarget *Subtarget,
+static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
@@ -19750,17 +20929,14 @@ static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, SDLoc DL,
int NumByteElts = VecSize / 8;
MVT ByteVecVT = MVT::getVectorVT(MVT::i8, NumByteElts);
SDValue In = DAG.getBitcast(ByteVecVT, Op);
- SmallVector<SDValue, 16> LUTVec;
+ SmallVector<SDValue, 64> LUTVec;
for (int i = 0; i < NumByteElts; ++i)
LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
- SDValue InRegLUT = DAG.getNode(ISD::BUILD_VECTOR, DL, ByteVecVT, LUTVec);
- SmallVector<SDValue, 16> Mask0F(NumByteElts,
- DAG.getConstant(0x0F, DL, MVT::i8));
- SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, DL, ByteVecVT, Mask0F);
+ SDValue InRegLUT = DAG.getBuildVector(ByteVecVT, DL, LUTVec);
+ SDValue M0F = DAG.getConstant(0x0F, DL, ByteVecVT);
// High nibbles
- SmallVector<SDValue, 16> Four(NumByteElts, DAG.getConstant(4, DL, MVT::i8));
- SDValue FourV = DAG.getNode(ISD::BUILD_VECTOR, DL, ByteVecVT, Four);
+ SDValue FourV = DAG.getConstant(4, DL, ByteVecVT);
SDValue HighNibbles = DAG.getNode(ISD::SRL, DL, ByteVecVT, In, FourV);
// Low nibbles
@@ -19781,8 +20957,8 @@ static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, SDLoc DL,
return LowerHorizontalByteSum(PopCnt, VT, Subtarget, DAG);
}
-static SDValue LowerVectorCTPOPBitmath(SDValue Op, SDLoc DL,
- const X86Subtarget *Subtarget,
+static SDValue LowerVectorCTPOPBitmath(SDValue Op, const SDLoc &DL,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
assert(VT.is128BitVector() &&
@@ -19801,19 +20977,13 @@ static SDValue LowerVectorCTPOPBitmath(SDValue Op, SDLoc DL,
auto GetShift = [&](unsigned OpCode, SDValue V, int Shifter) {
MVT VT = V.getSimpleValueType();
- SmallVector<SDValue, 32> Shifters(
- VT.getVectorNumElements(),
- DAG.getConstant(Shifter, DL, VT.getVectorElementType()));
- return DAG.getNode(OpCode, DL, VT, V,
- DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Shifters));
+ SDValue ShifterV = DAG.getConstant(Shifter, DL, VT);
+ return DAG.getNode(OpCode, DL, VT, V, ShifterV);
};
auto GetMask = [&](SDValue V, APInt Mask) {
MVT VT = V.getSimpleValueType();
- SmallVector<SDValue, 32> Masks(
- VT.getVectorNumElements(),
- DAG.getConstant(Mask, DL, VT.getVectorElementType()));
- return DAG.getNode(ISD::AND, DL, VT, V,
- DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Masks));
+ SDValue MaskV = DAG.getConstant(Mask, DL, VT);
+ return DAG.getNode(ISD::AND, DL, VT, V, MaskV);
};
// We don't want to incur the implicit masks required to SRL vNi8 vectors on
@@ -19852,27 +21022,38 @@ static SDValue LowerVectorCTPOPBitmath(SDValue Op, SDLoc DL,
DAG);
}
-static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
- // FIXME: Need to add AVX-512 support here!
- assert((VT.is256BitVector() || VT.is128BitVector()) &&
+ assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
"Unknown CTPOP type to handle");
SDLoc DL(Op.getNode());
SDValue Op0 = Op.getOperand(0);
- if (!Subtarget->hasSSSE3()) {
+ if (!Subtarget.hasSSSE3()) {
// We can't use the fast LUT approach, so fall back on vectorized bitmath.
assert(VT.is128BitVector() && "Only 128-bit vectors supported in SSE!");
return LowerVectorCTPOPBitmath(Op0, DL, Subtarget, DAG);
}
- if (VT.is256BitVector() && !Subtarget->hasInt256()) {
+ if (VT.is256BitVector() && !Subtarget.hasInt256()) {
unsigned NumElems = VT.getVectorNumElements();
// Extract each 128-bit vector, compute pop count and concat the result.
- SDValue LHS = Extract128BitVector(Op0, 0, DAG, DL);
- SDValue RHS = Extract128BitVector(Op0, NumElems/2, DAG, DL);
+ SDValue LHS = extract128BitVector(Op0, 0, DAG, DL);
+ SDValue RHS = extract128BitVector(Op0, NumElems / 2, DAG, DL);
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
+ LowerVectorCTPOPInRegLUT(LHS, DL, Subtarget, DAG),
+ LowerVectorCTPOPInRegLUT(RHS, DL, Subtarget, DAG));
+ }
+
+ if (VT.is512BitVector() && !Subtarget.hasBWI()) {
+ unsigned NumElems = VT.getVectorNumElements();
+
+ // Extract each 256-bit vector, compute pop count and concat the result.
+ SDValue LHS = extract256BitVector(Op0, 0, DAG, DL);
+ SDValue RHS = extract256BitVector(Op0, NumElems / 2, DAG, DL);
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
LowerVectorCTPOPInRegLUT(LHS, DL, Subtarget, DAG),
@@ -19882,26 +21063,184 @@ static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget *Subtarget,
return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
}
-static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Op.getSimpleValueType().isVector() &&
"We only do custom lowering for vector population count.");
return LowerVectorCTPOP(Op, Subtarget, DAG);
}
-static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
- SDNode *Node = Op.getNode();
- SDLoc dl(Node);
- EVT T = Node->getValueType(0);
- SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
- DAG.getConstant(0, dl, T), Node->getOperand(2));
- return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
- cast<AtomicSDNode>(Node)->getMemoryVT(),
- Node->getOperand(0),
- Node->getOperand(1), negOp,
- cast<AtomicSDNode>(Node)->getMemOperand(),
- cast<AtomicSDNode>(Node)->getOrdering(),
- cast<AtomicSDNode>(Node)->getSynchScope());
+static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
+ MVT VT = Op.getSimpleValueType();
+ SDValue In = Op.getOperand(0);
+ SDLoc DL(Op);
+
+ // For scalars, its still beneficial to transfer to/from the SIMD unit to
+ // perform the BITREVERSE.
+ if (!VT.isVector()) {
+ MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
+ SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
+ Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
+ DAG.getIntPtrConstant(0, DL));
+ }
+
+ MVT SVT = VT.getVectorElementType();
+ int NumElts = VT.getVectorNumElements();
+ int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
+
+ // Decompose 256-bit ops into smaller 128-bit ops.
+ if (VT.is256BitVector()) {
+ SDValue Lo = extract128BitVector(In, 0, DAG, DL);
+ SDValue Hi = extract128BitVector(In, NumElts / 2, DAG, DL);
+
+ MVT HalfVT = MVT::getVectorVT(SVT, NumElts / 2);
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
+ DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Lo),
+ DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Hi));
+ }
+
+ assert(VT.is128BitVector() &&
+ "Only 128-bit vector bitreverse lowering supported.");
+
+ // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
+ // perform the BSWAP in the shuffle.
+ // Its best to shuffle using the second operand as this will implicitly allow
+ // memory folding for multiple vectors.
+ SmallVector<SDValue, 16> MaskElts;
+ for (int i = 0; i != NumElts; ++i) {
+ for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
+ int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
+ int PermuteByte = SourceByte | (2 << 5);
+ MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
+ }
+ }
+
+ SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
+ SDValue Res = DAG.getBitcast(MVT::v16i8, In);
+ Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
+ Res, Mask);
+ return DAG.getBitcast(VT, Res);
+}
+
+static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
+ if (Subtarget.hasXOP())
+ return LowerBITREVERSE_XOP(Op, DAG);
+
+ assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
+
+ MVT VT = Op.getSimpleValueType();
+ SDValue In = Op.getOperand(0);
+ SDLoc DL(Op);
+
+ unsigned NumElts = VT.getVectorNumElements();
+ assert(VT.getScalarType() == MVT::i8 &&
+ "Only byte vector BITREVERSE supported");
+
+ // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
+ if (VT.is256BitVector() && !Subtarget.hasInt256()) {
+ MVT HalfVT = MVT::getVectorVT(MVT::i8, NumElts / 2);
+ SDValue Lo = extract128BitVector(In, 0, DAG, DL);
+ SDValue Hi = extract128BitVector(In, NumElts / 2, DAG, DL);
+ Lo = DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Lo);
+ Hi = DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Hi);
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
+ }
+
+ // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
+ // two nibbles and a PSHUFB lookup to find the bitreverse of each
+ // 0-15 value (moved to the other nibble).
+ SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
+ SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
+ SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
+
+ const int LoLUT[16] = {
+ /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
+ /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
+ /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
+ /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
+ const int HiLUT[16] = {
+ /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
+ /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
+ /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
+ /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
+
+ SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
+ for (unsigned i = 0; i < NumElts; ++i) {
+ LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
+ HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
+ }
+
+ SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
+ SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
+ Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
+ Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
+ return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
+}
+
+static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG) {
+ unsigned NewOpc = 0;
+ switch (N->getOpcode()) {
+ case ISD::ATOMIC_LOAD_ADD:
+ NewOpc = X86ISD::LADD;
+ break;
+ case ISD::ATOMIC_LOAD_SUB:
+ NewOpc = X86ISD::LSUB;
+ break;
+ case ISD::ATOMIC_LOAD_OR:
+ NewOpc = X86ISD::LOR;
+ break;
+ case ISD::ATOMIC_LOAD_XOR:
+ NewOpc = X86ISD::LXOR;
+ break;
+ case ISD::ATOMIC_LOAD_AND:
+ NewOpc = X86ISD::LAND;
+ break;
+ default:
+ llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
+ }
+
+ MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
+ return DAG.getMemIntrinsicNode(
+ NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
+ {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
+ /*MemVT=*/N->getSimpleValueType(0), MMO);
+}
+
+/// Lower atomic_load_ops into LOCK-prefixed operations.
+static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ SDValue Chain = N->getOperand(0);
+ SDValue LHS = N->getOperand(1);
+ SDValue RHS = N->getOperand(2);
+ unsigned Opc = N->getOpcode();
+ MVT VT = N->getSimpleValueType(0);
+ SDLoc DL(N);
+
+ // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
+ // can only be lowered when the result is unused. They should have already
+ // been transformed into a cmpxchg loop in AtomicExpand.
+ if (N->hasAnyUseOfValue(0)) {
+ // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
+ // select LXADD if LOCK_SUB can't be selected.
+ if (Opc == ISD::ATOMIC_LOAD_SUB) {
+ AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
+ RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
+ return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
+ RHS, AN->getMemOperand(), AN->getOrdering(),
+ AN->getSynchScope());
+ }
+ assert(Opc == ISD::ATOMIC_LOAD_ADD &&
+ "Used AtomicRMW ops other than Add should have been expanded!");
+ return N;
+ }
+
+ SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG);
+ // RAUW the chain, but don't worry about the result, as it's unused.
+ assert(!N->hasAnyUseOfValue(0));
+ DAG.ReplaceAllUsesOfValueWith(N.getValue(1), LockOp.getValue(1));
+ return SDValue();
}
static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
@@ -19914,7 +21253,8 @@ static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
// FIXME: On 32-bit, store -> fist or movq would be more efficient
// (The only way to get a 16-byte store is cmpxchg16b)
// FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
- if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
+ if (cast<AtomicSDNode>(Node)->getOrdering() ==
+ AtomicOrdering::SequentiallyConsistent ||
!DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
cast<AtomicSDNode>(Node)->getMemoryVT(),
@@ -19955,9 +21295,9 @@ static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
Op.getOperand(1), Op.getOperand(2));
}
-static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
+ assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
// For MacOSX, we want to call an alternative entry point: __sincos_stret,
// which returns the values as { float, float } (in XMM0) or
@@ -19991,7 +21331,7 @@ static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
- .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
+ .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
@@ -20051,7 +21391,7 @@ static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
DAG.getUNDEF(EltVT);
for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
Ops.push_back(FillVal);
- return DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, Ops);
+ return DAG.getBuildVector(NVT, dl, Ops);
}
SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
DAG.getUNDEF(NVT);
@@ -20059,9 +21399,9 @@ static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
InOp, DAG.getIntPtrConstant(0, dl));
}
-static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- assert(Subtarget->hasAVX512() &&
+ assert(Subtarget.hasAVX512() &&
"MGATHER/MSCATTER are supported on AVX-512 arch only");
// X86 scatter kills mask register, so its type should be added to
@@ -20110,7 +21450,7 @@ static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget *Subtarget,
}
unsigned NumElts = VT.getVectorNumElements();
- if (!Subtarget->hasVLX() && !VT.is512BitVector() &&
+ if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
!Index.getSimpleValueType().is512BitVector()) {
// AVX512F supports only 512-bit vectors. Or data or index should
// be 512 bit wide. If now the both index and data are 256-bit, but
@@ -20150,68 +21490,78 @@ static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget *Subtarget,
NewScatter = DAG.getMaskedScatter(VTs, N->getMemoryVT(), dl, Ops,
N->getMemOperand());
DAG.ReplaceAllUsesWith(Op, SDValue(NewScatter.getNode(), 1));
- return SDValue(NewScatter.getNode(), 0);
+ return SDValue(NewScatter.getNode(), 1);
}
-static SDValue LowerMLOAD(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
MVT VT = Op.getSimpleValueType();
+ MVT ScalarVT = VT.getScalarType();
SDValue Mask = N->getMask();
SDLoc dl(Op);
- if (Subtarget->hasAVX512() && !Subtarget->hasVLX() &&
- !VT.is512BitVector() && Mask.getValueType() == MVT::v8i1) {
- // This operation is legal for targets with VLX, but without
- // VLX the vector should be widened to 512 bit
- unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
- MVT WideDataVT = MVT::getVectorVT(VT.getScalarType(), NumEltsInWideVec);
- MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
- SDValue Src0 = N->getSrc0();
- Src0 = ExtendToType(Src0, WideDataVT, DAG);
- Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
- SDValue NewLoad = DAG.getMaskedLoad(WideDataVT, dl, N->getChain(),
- N->getBasePtr(), Mask, Src0,
- N->getMemoryVT(), N->getMemOperand(),
- N->getExtensionType());
-
- SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
- NewLoad.getValue(0),
- DAG.getIntPtrConstant(0, dl));
- SDValue RetOps[] = {Exract, NewLoad.getValue(1)};
- return DAG.getMergeValues(RetOps, dl);
- }
- return Op;
+ assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
+ "Cannot lower masked load op.");
+
+ assert(((ScalarVT == MVT::i32 || ScalarVT == MVT::f32) ||
+ (Subtarget.hasBWI() &&
+ (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
+ "Unsupported masked load op.");
+
+ // This operation is legal for targets with VLX, but without
+ // VLX the vector should be widened to 512 bit
+ unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
+ MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
+ MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
+ SDValue Src0 = N->getSrc0();
+ Src0 = ExtendToType(Src0, WideDataVT, DAG);
+ Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
+ SDValue NewLoad = DAG.getMaskedLoad(WideDataVT, dl, N->getChain(),
+ N->getBasePtr(), Mask, Src0,
+ N->getMemoryVT(), N->getMemOperand(),
+ N->getExtensionType());
+
+ SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
+ NewLoad.getValue(0),
+ DAG.getIntPtrConstant(0, dl));
+ SDValue RetOps[] = {Exract, NewLoad.getValue(1)};
+ return DAG.getMergeValues(RetOps, dl);
}
-static SDValue LowerMSTORE(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
SDValue DataToStore = N->getValue();
MVT VT = DataToStore.getSimpleValueType();
+ MVT ScalarVT = VT.getScalarType();
SDValue Mask = N->getMask();
SDLoc dl(Op);
- if (Subtarget->hasAVX512() && !Subtarget->hasVLX() &&
- !VT.is512BitVector() && Mask.getValueType() == MVT::v8i1) {
- // This operation is legal for targets with VLX, but without
- // VLX the vector should be widened to 512 bit
- unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
- MVT WideDataVT = MVT::getVectorVT(VT.getScalarType(), NumEltsInWideVec);
- MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
- DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
- Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
- return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
- Mask, N->getMemoryVT(), N->getMemOperand(),
- N->isTruncatingStore());
- }
- return Op;
+ assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
+ "Cannot lower masked store op.");
+
+ assert(((ScalarVT == MVT::i32 || ScalarVT == MVT::f32) ||
+ (Subtarget.hasBWI() &&
+ (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
+ "Unsupported masked store op.");
+
+ // This operation is legal for targets with VLX, but without
+ // VLX the vector should be widened to 512 bit
+ unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
+ MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
+ MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
+ DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
+ Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
+ return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
+ Mask, N->getMemoryVT(), N->getMemOperand(),
+ N->isTruncatingStore());
}
-static SDValue LowerMGATHER(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- assert(Subtarget->hasAVX512() &&
+ assert(Subtarget.hasAVX512() &&
"MGATHER/MSCATTER are supported on AVX-512 arch only");
MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
@@ -20226,7 +21576,7 @@ static SDValue LowerMGATHER(SDValue Op, const X86Subtarget *Subtarget,
unsigned NumElts = VT.getVectorNumElements();
assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
- if (!Subtarget->hasVLX() && !VT.is512BitVector() &&
+ if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
!Index.getSimpleValueType().is512BitVector()) {
// AVX512F supports only 512-bit vectors. Or data or index should
// be 512 bit wide. If now the both index and data are 256-bit, but
@@ -20314,8 +21664,7 @@ SDValue X86TargetLowering::LowerGC_TRANSITION_END(SDValue Op,
return NOOP;
}
-/// LowerOperation - Provide custom lowering hooks for some operations.
-///
+/// Provide custom lowering hooks for some operations.
SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default: llvm_unreachable("Should not custom lower this!");
@@ -20323,8 +21672,13 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
return LowerCMP_SWAP(Op, Subtarget, DAG);
case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
- case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
- case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
+ case ISD::ATOMIC_LOAD_ADD:
+ case ISD::ATOMIC_LOAD_SUB:
+ case ISD::ATOMIC_LOAD_OR:
+ case ISD::ATOMIC_LOAD_XOR:
+ case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget);
+ case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
+ case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
case ISD::VECTOR_SHUFFLE: return lowerVectorShuffle(Op, Subtarget, DAG);
@@ -20377,14 +21731,18 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
+ case ISD::EH_SJLJ_SETUP_DISPATCH:
+ return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
- case ISD::CTLZ: return LowerCTLZ(Op, Subtarget, DAG);
- case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, Subtarget, DAG);
+ case ISD::CTLZ:
+ case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG);
case ISD::CTTZ:
case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, DAG);
case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
+ case ISD::MULHS:
+ case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG);
case ISD::UMUL_LOHI:
case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
case ISD::ROTL: return LowerRotate(Op, Subtarget, DAG);
@@ -20417,11 +21775,34 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::GC_TRANSITION_START:
return LowerGC_TRANSITION_START(Op, DAG);
case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION_END(Op, DAG);
+ case ISD::STORE: return LowerTruncatingStore(Op, Subtarget, DAG);
}
}
-/// ReplaceNodeResults - Replace a node with an illegal result type
-/// with a new node built out of custom code.
+/// Places new result values for the node in Results (their number
+/// and types must exactly match those of the original return values of
+/// the node), or leaves Results empty, which indicates that the node is not
+/// to be custom lowered after all.
+void X86TargetLowering::LowerOperationWrapper(SDNode *N,
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const {
+ SDValue Res = LowerOperation(SDValue(N, 0), DAG);
+
+ if (!Res.getNode())
+ return;
+
+ assert((N->getNumValues() <= Res->getNumValues()) &&
+ "Lowering returned the wrong number of results!");
+
+ // Places new result values base on N result number.
+ // In some cases (LowerSINT_TO_FP for example) Res has more result values
+ // than original node, chain should be dropped(last value).
+ for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
+ Results.push_back(Res.getValue(I));
+}
+
+/// Replace a node with an illegal result type with a new node built out of
+/// custom code.
void X86TargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const {
@@ -20432,15 +21813,15 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
llvm_unreachable("Do not know how to custom type legalize this operation!");
case X86ISD::AVG: {
// Legalize types for X86ISD::AVG by expanding vectors.
- assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
+ assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
auto InVT = N->getValueType(0);
auto InVTSize = InVT.getSizeInBits();
const unsigned RegSize =
(InVTSize > 128) ? ((InVTSize > 256) ? 512 : 256) : 128;
- assert((!Subtarget->hasAVX512() || RegSize < 512) &&
+ assert((!Subtarget.hasAVX512() || RegSize < 512) &&
"512-bit vector requires AVX512");
- assert((!Subtarget->hasAVX2() || RegSize < 256) &&
+ assert((!Subtarget.hasAVX2() || RegSize < 256) &&
"256-bit vector requires AVX2");
auto ElemVT = InVT.getVectorElementType();
@@ -20503,24 +21884,22 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
EVT VT = N->getValueType(0);
// Return a load from the stack slot.
if (StackSlot.getNode())
- Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
- MachinePointerInfo(),
- false, false, false, 0));
+ Results.push_back(
+ DAG.getLoad(VT, dl, FIST, StackSlot, MachinePointerInfo()));
else
Results.push_back(FIST);
}
return;
}
case ISD::UINT_TO_FP: {
- assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
+ assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
if (N->getOperand(0).getValueType() != MVT::v2i32 ||
N->getValueType(0) != MVT::v2f32)
return;
SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
N->getOperand(0));
- SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
- MVT::f64);
- SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
+ SDValue VBias =
+ DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
DAG.getBitcast(MVT::v2i64, VBias));
Or = DAG.getBitcast(MVT::v2f64, Or);
@@ -20588,20 +21967,49 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
DAG.getConstant(0, dl, HalfT));
swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
DAG.getConstant(1, dl, HalfT));
- swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
- Regs64bit ? X86::RBX : X86::EBX,
- swapInL, cpInH.getValue(1));
- swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
- Regs64bit ? X86::RCX : X86::ECX,
- swapInH, swapInL.getValue(1));
- SDValue Ops[] = { swapInH.getValue(0),
- N->getOperand(1),
- swapInH.getValue(1) };
+ swapInH =
+ DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
+ swapInH, cpInH.getValue(1));
+ // If the current function needs the base pointer, RBX,
+ // we shouldn't use cmpxchg directly.
+ // Indeed the lowering of that instruction will clobber
+ // that register and since RBX will be a reserved register
+ // the register allocator will not make sure its value will
+ // be properly saved and restored around this live-range.
+ const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
+ SDValue Result;
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
+ unsigned BasePtr = TRI->getBaseRegister();
MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
- unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
- X86ISD::LCMPXCHG8_DAG;
- SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
+ if (TRI->hasBasePointer(DAG.getMachineFunction()) &&
+ (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
+ // ISel prefers the LCMPXCHG64 variant.
+ // If that assert breaks, that means it is not the case anymore,
+ // and we need to teach LCMPXCHG8_SAVE_EBX_DAG how to save RBX,
+ // not just EBX. This is a matter of accepting i64 input for that
+ // pseudo, and restoring into the register of the right wide
+ // in expand pseudo. Everything else should just work.
+ assert(((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) &&
+ "Saving only half of the RBX");
+ unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_SAVE_RBX_DAG
+ : X86ISD::LCMPXCHG8_SAVE_EBX_DAG;
+ SDValue RBXSave = DAG.getCopyFromReg(swapInH.getValue(0), dl,
+ Regs64bit ? X86::RBX : X86::EBX,
+ HalfT, swapInH.getValue(1));
+ SDValue Ops[] = {/*Chain*/ RBXSave.getValue(1), N->getOperand(1), swapInL,
+ RBXSave,
+ /*Glue*/ RBXSave.getValue(2)};
+ Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
+ } else {
+ unsigned Opcode =
+ Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG;
+ swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl,
+ Regs64bit ? X86::RBX : X86::EBX, swapInL,
+ swapInH.getValue(1));
+ SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
+ swapInL.getValue(1)};
+ Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
+ }
SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
Regs64bit ? X86::RAX : X86::EAX,
HalfT, Result.getValue(1));
@@ -20639,7 +22047,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
break;
}
case ISD::BITCAST: {
- assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
+ assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
EVT DstVT = N->getValueType(0);
EVT SrcVT = N->getOperand(0)->getValueType(0);
@@ -20666,7 +22074,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
ToVecInt, DAG.getIntPtrConstant(i, dl)));
- Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
+ Results.push_back(DAG.getBuildVector(DstVT, dl, Elts));
}
}
}
@@ -20703,7 +22111,6 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::SETCC: return "X86ISD::SETCC";
case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
case X86ISD::FSETCC: return "X86ISD::FSETCC";
- case X86ISD::FGETSIGNx86: return "X86ISD::FGETSIGNx86";
case X86ISD::CMOV: return "X86ISD::CMOV";
case X86ISD::BRCOND: return "X86ISD::BRCOND";
case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
@@ -20724,7 +22131,6 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::MMX_PINSRW: return "X86ISD::MMX_PINSRW";
case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
case X86ISD::ANDNP: return "X86ISD::ANDNP";
- case X86ISD::PSIGN: return "X86ISD::PSIGN";
case X86ISD::BLENDI: return "X86ISD::BLENDI";
case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
case X86ISD::ADDUS: return "X86ISD::ADDUS";
@@ -20742,7 +22148,9 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::FMAXC: return "X86ISD::FMAXC";
case X86ISD::FMINC: return "X86ISD::FMINC";
case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
+ case X86ISD::FRSQRTS: return "X86ISD::FRSQRTS";
case X86ISD::FRCP: return "X86ISD::FRCP";
+ case X86ISD::FRCPS: return "X86ISD::FRCPS";
case X86ISD::EXTRQI: return "X86ISD::EXTRQI";
case X86ISD::INSERTQI: return "X86ISD::INSERTQI";
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
@@ -20750,6 +22158,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
+ case X86ISD::EH_SJLJ_SETUP_DISPATCH:
+ return "X86ISD::EH_SJLJ_SETUP_DISPATCH";
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
@@ -20757,6 +22167,15 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
+ case X86ISD::LCMPXCHG8_SAVE_EBX_DAG:
+ return "X86ISD::LCMPXCHG8_SAVE_EBX_DAG";
+ case X86ISD::LCMPXCHG16_SAVE_RBX_DAG:
+ return "X86ISD::LCMPXCHG16_SAVE_RBX_DAG";
+ case X86ISD::LADD: return "X86ISD::LADD";
+ case X86ISD::LSUB: return "X86ISD::LSUB";
+ case X86ISD::LOR: return "X86ISD::LOR";
+ case X86ISD::LXOR: return "X86ISD::LXOR";
+ case X86ISD::LAND: return "X86ISD::LAND";
case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
case X86ISD::VZEXT: return "X86ISD::VZEXT";
@@ -20778,8 +22197,10 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::VSHLI: return "X86ISD::VSHLI";
case X86ISD::VSRLI: return "X86ISD::VSRLI";
case X86ISD::VSRAI: return "X86ISD::VSRAI";
+ case X86ISD::VSRAV: return "X86ISD::VSRAV";
case X86ISD::VROTLI: return "X86ISD::VROTLI";
case X86ISD::VROTRI: return "X86ISD::VROTRI";
+ case X86ISD::VPPERM: return "X86ISD::VPPERM";
case X86ISD::CMPP: return "X86ISD::CMPP";
case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
@@ -20802,6 +22223,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::AND: return "X86ISD::AND";
case X86ISD::BEXTR: return "X86ISD::BEXTR";
case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
+ case X86ISD::MOVMSK: return "X86ISD::MOVMSK";
case X86ISD::PTEST: return "X86ISD::PTEST";
case X86ISD::TESTP: return "X86ISD::TESTP";
case X86ISD::TESTM: return "X86ISD::TESTM";
@@ -20842,6 +22264,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::VPERMI: return "X86ISD::VPERMI";
case X86ISD::VPTERNLOG: return "X86ISD::VPTERNLOG";
case X86ISD::VFIXUPIMM: return "X86ISD::VFIXUPIMM";
+ case X86ISD::VFIXUPIMMS: return "X86ISD::VFIXUPIMMS";
case X86ISD::VRANGE: return "X86ISD::VRANGE";
case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
@@ -20852,8 +22275,6 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
case X86ISD::MFENCE: return "X86ISD::MFENCE";
- case X86ISD::SFENCE: return "X86ISD::SFENCE";
- case X86ISD::LFENCE: return "X86ISD::LFENCE";
case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
case X86ISD::SAHF: return "X86ISD::SAHF";
case X86ISD::RDRAND: return "X86ISD::RDRAND";
@@ -20866,6 +22287,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::VPSHL: return "X86ISD::VPSHL";
case X86ISD::VPCOM: return "X86ISD::VPCOM";
case X86ISD::VPCOMU: return "X86ISD::VPCOMU";
+ case X86ISD::VPERMIL2: return "X86ISD::VPERMIL2";
case X86ISD::FMADD: return "X86ISD::FMADD";
case X86ISD::FMSUB: return "X86ISD::FMSUB";
case X86ISD::FNMADD: return "X86ISD::FNMADD";
@@ -20878,6 +22300,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::FNMSUB_RND: return "X86ISD::FNMSUB_RND";
case X86ISD::FMADDSUB_RND: return "X86ISD::FMADDSUB_RND";
case X86ISD::FMSUBADD_RND: return "X86ISD::FMSUBADD_RND";
+ case X86ISD::VPMADD52H: return "X86ISD::VPMADD52H";
+ case X86ISD::VPMADD52L: return "X86ISD::VPMADD52L";
case X86ISD::VRNDSCALE: return "X86ISD::VRNDSCALE";
case X86ISD::VREDUCE: return "X86ISD::VREDUCE";
case X86ISD::VGETMANT: return "X86ISD::VGETMANT";
@@ -20898,6 +22322,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::FSQRT_RND: return "X86ISD::FSQRT_RND";
case X86ISD::FGETEXP_RND: return "X86ISD::FGETEXP_RND";
case X86ISD::SCALEF: return "X86ISD::SCALEF";
+ case X86ISD::SCALEFS: return "X86ISD::SCALEFS";
case X86ISD::ADDS: return "X86ISD::ADDS";
case X86ISD::SUBS: return "X86ISD::SUBS";
case X86ISD::AVG: return "X86ISD::AVG";
@@ -20908,26 +22333,27 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::FP_TO_UINT_RND: return "X86ISD::FP_TO_UINT_RND";
case X86ISD::VFPCLASS: return "X86ISD::VFPCLASS";
case X86ISD::VFPCLASSS: return "X86ISD::VFPCLASSS";
+ case X86ISD::MULTISHIFT: return "X86ISD::MULTISHIFT";
+ case X86ISD::SCALAR_FP_TO_SINT_RND: return "X86ISD::SCALAR_FP_TO_SINT_RND";
+ case X86ISD::SCALAR_FP_TO_UINT_RND: return "X86ISD::SCALAR_FP_TO_UINT_RND";
}
return nullptr;
}
-// isLegalAddressingMode - Return true if the addressing mode represented
-// by AM is legal for this target, for a load/store of the specified type.
+/// Return true if the addressing mode represented by AM is legal for this
+/// target, for a load/store of the specified type.
bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
const AddrMode &AM, Type *Ty,
unsigned AS) const {
// X86 supports extremely general addressing modes.
CodeModel::Model M = getTargetMachine().getCodeModel();
- Reloc::Model R = getTargetMachine().getRelocationModel();
// X86 allows a sign-extended 32-bit immediate field as a displacement.
if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
return false;
if (AM.BaseGV) {
- unsigned GVFlags =
- Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
+ unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
// If a reference to this global requires an extra load, we can't fold it.
if (isGlobalStubReference(GVFlags))
@@ -20939,8 +22365,8 @@ bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
return false;
// If lower 4G is not available, then we must use rip-relative addressing.
- if ((M != CodeModel::Small || R != Reloc::Static) &&
- Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
+ if ((M != CodeModel::Small || isPositionIndependent()) &&
+ Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
return false;
}
@@ -20977,7 +22403,7 @@ bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
// On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
// variable shifts just as cheap as scalar ones.
- if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
+ if (Subtarget.hasInt256() && (Bits == 32 || Bits == 64))
return false;
// Otherwise, it's significantly cheaper to shift by a scalar amount than by a
@@ -21026,12 +22452,12 @@ bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
// x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
- return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
+ return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
}
bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
// x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
- return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
+ return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
}
bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
@@ -21062,7 +22488,7 @@ bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
bool
X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
- if (!Subtarget->hasAnyFMA())
+ if (!Subtarget.hasAnyFMA())
return false;
VT = VT.getScalarType();
@@ -21086,8 +22512,8 @@ bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
return !(VT1 == MVT::i32 && VT2 == MVT::i16);
}
-/// isShuffleMaskLegal - Targets can use this to indicate that they only
-/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
+/// Targets can use this to indicate that they only support *some*
+/// VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
/// are assumed to be legal.
bool
@@ -21121,9 +22547,9 @@ X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
//===----------------------------------------------------------------------===//
/// Utility function to emit xbegin specifying the start of an RTM region.
-static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
+static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
const TargetInstrInfo *TII) {
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
const BasicBlock *BB = MBB->getBasicBlock();
MachineFunction::iterator I = ++MBB->getIterator();
@@ -21167,21 +22593,21 @@ static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
// sinkMBB:
// EAX is live into the sinkMBB
sinkMBB->addLiveIn(X86::EAX);
- BuildMI(*sinkMBB, sinkMBB->begin(), DL,
- TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
- .addReg(X86::EAX);
+ BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(TargetOpcode::COPY),
+ MI.getOperand(0).getReg())
+ .addReg(X86::EAX);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return sinkMBB;
}
// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
// or XMM0_V32I8 in AVX all of this code can be replaced with that
// in the .td file.
-static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
+static MachineBasicBlock *emitPCMPSTRM(MachineInstr &MI, MachineBasicBlock *BB,
const TargetInstrInfo *TII) {
unsigned Opc;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("illegal opcode!");
case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
@@ -21193,32 +22619,31 @@ static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
}
- DebugLoc dl = MI->getDebugLoc();
+ DebugLoc dl = MI.getDebugLoc();
MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
- unsigned NumArgs = MI->getNumOperands();
+ unsigned NumArgs = MI.getNumOperands();
for (unsigned i = 1; i < NumArgs; ++i) {
- MachineOperand &Op = MI->getOperand(i);
+ MachineOperand &Op = MI.getOperand(i);
if (!(Op.isReg() && Op.isImplicit()))
MIB.addOperand(Op);
}
- if (MI->hasOneMemOperand())
- MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ if (MI.hasOneMemOperand())
+ MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- BuildMI(*BB, MI, dl,
- TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
- .addReg(X86::XMM0);
+ BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
+ .addReg(X86::XMM0);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return BB;
}
// FIXME: Custom handling because TableGen doesn't support multiple implicit
// defs in an instruction pattern
-static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
+static MachineBasicBlock *emitPCMPSTRI(MachineInstr &MI, MachineBasicBlock *BB,
const TargetInstrInfo *TII) {
unsigned Opc;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("illegal opcode!");
case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
@@ -21230,93 +22655,90 @@ static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
}
- DebugLoc dl = MI->getDebugLoc();
+ DebugLoc dl = MI.getDebugLoc();
MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
- unsigned NumArgs = MI->getNumOperands(); // remove the results
+ unsigned NumArgs = MI.getNumOperands(); // remove the results
for (unsigned i = 1; i < NumArgs; ++i) {
- MachineOperand &Op = MI->getOperand(i);
+ MachineOperand &Op = MI.getOperand(i);
if (!(Op.isReg() && Op.isImplicit()))
MIB.addOperand(Op);
}
- if (MI->hasOneMemOperand())
- MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ if (MI.hasOneMemOperand())
+ MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- BuildMI(*BB, MI, dl,
- TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
- .addReg(X86::ECX);
+ BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
+ .addReg(X86::ECX);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return BB;
}
-static MachineBasicBlock *EmitWRPKRU(MachineInstr *MI, MachineBasicBlock *BB,
- const X86Subtarget *Subtarget) {
- DebugLoc dl = MI->getDebugLoc();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+static MachineBasicBlock *emitWRPKRU(MachineInstr &MI, MachineBasicBlock *BB,
+ const X86Subtarget &Subtarget) {
+ DebugLoc dl = MI.getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
// insert input VAL into EAX
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EAX)
- .addReg(MI->getOperand(0).getReg());
+ .addReg(MI.getOperand(0).getReg());
// insert zero to ECX
- BuildMI(*BB, MI, dl, TII->get(X86::XOR32rr), X86::ECX)
- .addReg(X86::ECX)
- .addReg(X86::ECX);
+ BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::ECX);
+
// insert zero to EDX
- BuildMI(*BB, MI, dl, TII->get(X86::XOR32rr), X86::EDX)
- .addReg(X86::EDX)
- .addReg(X86::EDX);
+ BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::EDX);
+
// insert WRPKRU instruction
BuildMI(*BB, MI, dl, TII->get(X86::WRPKRUr));
- MI->eraseFromParent(); // The pseudo is gone now.
+ MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
-static MachineBasicBlock *EmitRDPKRU(MachineInstr *MI, MachineBasicBlock *BB,
- const X86Subtarget *Subtarget) {
- DebugLoc dl = MI->getDebugLoc();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+static MachineBasicBlock *emitRDPKRU(MachineInstr &MI, MachineBasicBlock *BB,
+ const X86Subtarget &Subtarget) {
+ DebugLoc dl = MI.getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
// insert zero to ECX
- BuildMI(*BB, MI, dl, TII->get(X86::XOR32rr), X86::ECX)
- .addReg(X86::ECX)
- .addReg(X86::ECX);
+ BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::ECX);
+
// insert RDPKRU instruction
BuildMI(*BB, MI, dl, TII->get(X86::RDPKRUr));
- BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
- .addReg(X86::EAX);
+ BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
+ .addReg(X86::EAX);
- MI->eraseFromParent(); // The pseudo is gone now.
+ MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
-static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
- const X86Subtarget *Subtarget) {
- DebugLoc dl = MI->getDebugLoc();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+static MachineBasicBlock *emitMonitor(MachineInstr &MI, MachineBasicBlock *BB,
+ const X86Subtarget &Subtarget,
+ unsigned Opc) {
+ DebugLoc dl = MI.getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
// Address into RAX/EAX, other two args into ECX, EDX.
- unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
- unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
+ unsigned MemOpc = Subtarget.is64Bit() ? X86::LEA64r : X86::LEA32r;
+ unsigned MemReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
for (int i = 0; i < X86::AddrNumOperands; ++i)
- MIB.addOperand(MI->getOperand(i));
+ MIB.addOperand(MI.getOperand(i));
unsigned ValOps = X86::AddrNumOperands;
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
- .addReg(MI->getOperand(ValOps).getReg());
+ .addReg(MI.getOperand(ValOps).getReg());
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
- .addReg(MI->getOperand(ValOps+1).getReg());
+ .addReg(MI.getOperand(ValOps + 1).getReg());
// The instruction doesn't actually take any operands though.
- BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
+ BuildMI(*BB, MI, dl, TII->get(Opc));
- MI->eraseFromParent(); // The pseudo is gone now.
+ MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
MachineBasicBlock *
-X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
+X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const {
// Emit va_arg instruction on X86-64.
@@ -21328,31 +22750,31 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
// 8 ) Align : Alignment of type
// 9 ) EFLAGS (implicit-def)
- assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
+ assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
static_assert(X86::AddrNumOperands == 5,
"VAARG_64 assumes 5 address operands");
- unsigned DestReg = MI->getOperand(0).getReg();
- MachineOperand &Base = MI->getOperand(1);
- MachineOperand &Scale = MI->getOperand(2);
- MachineOperand &Index = MI->getOperand(3);
- MachineOperand &Disp = MI->getOperand(4);
- MachineOperand &Segment = MI->getOperand(5);
- unsigned ArgSize = MI->getOperand(6).getImm();
- unsigned ArgMode = MI->getOperand(7).getImm();
- unsigned Align = MI->getOperand(8).getImm();
+ unsigned DestReg = MI.getOperand(0).getReg();
+ MachineOperand &Base = MI.getOperand(1);
+ MachineOperand &Scale = MI.getOperand(2);
+ MachineOperand &Index = MI.getOperand(3);
+ MachineOperand &Disp = MI.getOperand(4);
+ MachineOperand &Segment = MI.getOperand(5);
+ unsigned ArgSize = MI.getOperand(6).getImm();
+ unsigned ArgMode = MI.getOperand(7).getImm();
+ unsigned Align = MI.getOperand(8).getImm();
// Memory Reference
- assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
- MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
+ assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
+ MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
+ MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
// Machine Information
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
// struct va_list {
// i32 gp_offset
@@ -21521,7 +22943,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
// to OverflowDestReg.
if (NeedsAlign) {
// Align the overflow address
- assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
+ assert(isPowerOf2_32(Align) && "Alignment must be a power of 2");
unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
// aligned_addr = (addr + (align-1)) & ~(align-1)
@@ -21563,15 +22985,13 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
}
// Erase the pseudo instruction
- MI->eraseFromParent();
+ MI.eraseFromParent();
return endMBB;
}
-MachineBasicBlock *
-X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
- MachineInstr *MI,
- MachineBasicBlock *MBB) const {
+MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
+ MachineInstr &MI, MachineBasicBlock *MBB) const {
// Emit code to save XMM registers to the stack. The ABI says that the
// number of registers to save is given in %al, so it's theoretically
// possible to do an indirect jump trick to avoid saving all of them,
@@ -21602,14 +23022,14 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
XMMSaveMBB->addSuccessor(EndMBB);
// Now add the instructions.
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
- unsigned CountReg = MI->getOperand(0).getReg();
- int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
- int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
+ unsigned CountReg = MI.getOperand(0).getReg();
+ int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
+ int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
- if (!Subtarget->isCallingConvWin64(F->getFunction()->getCallingConv())) {
+ if (!Subtarget.isCallingConvWin64(F->getFunction()->getCallingConv())) {
// If %al is 0, branch around the XMM save block.
BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
@@ -21618,29 +23038,29 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
// Make sure the last operand is EFLAGS, which gets clobbered by the branch
// that was just emitted, but clearly shouldn't be "saved".
- assert((MI->getNumOperands() <= 3 ||
- !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
- MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
- && "Expected last argument to be EFLAGS");
- unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
+ assert((MI.getNumOperands() <= 3 ||
+ !MI.getOperand(MI.getNumOperands() - 1).isReg() ||
+ MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&
+ "Expected last argument to be EFLAGS");
+ unsigned MOVOpc = Subtarget.hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
// In the XMM save block, save all the XMM argument registers.
- for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
+ for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) {
int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
MachineMemOperand *MMO = F->getMachineMemOperand(
MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
MachineMemOperand::MOStore,
/*Size=*/16, /*Align=*/16);
BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
- .addFrameIndex(RegSaveFrameIndex)
- .addImm(/*Scale=*/1)
- .addReg(/*IndexReg=*/0)
- .addImm(/*Disp=*/Offset)
- .addReg(/*Segment=*/0)
- .addReg(MI->getOperand(i).getReg())
- .addMemOperand(MMO);
+ .addFrameIndex(RegSaveFrameIndex)
+ .addImm(/*Scale=*/1)
+ .addReg(/*IndexReg=*/0)
+ .addImm(/*Disp=*/Offset)
+ .addReg(/*Segment=*/0)
+ .addReg(MI.getOperand(i).getReg())
+ .addMemOperand(MMO);
}
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return EndMBB;
}
@@ -21684,8 +23104,8 @@ static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
// Return true if it is OK for this CMOV pseudo-opcode to be cascaded
// together with other CMOV pseudo-opcodes into a single basic-block with
// conditional jump around it.
-static bool isCMOVPseudo(MachineInstr *MI) {
- switch (MI->getOpcode()) {
+static bool isCMOVPseudo(MachineInstr &MI) {
+ switch (MI.getOpcode()) {
case X86::CMOV_FR32:
case X86::CMOV_FR64:
case X86::CMOV_GR8:
@@ -21715,10 +23135,10 @@ static bool isCMOVPseudo(MachineInstr *MI) {
}
MachineBasicBlock *
-X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
+X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
MachineBasicBlock *BB) const {
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
// To "insert" a SELECT_CC instruction, we actually have to insert the
// diamond control-flow pattern. The incoming instruction knows the
@@ -21837,8 +23257,8 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
// retq
//
MachineInstr *CascadedCMOV = nullptr;
- MachineInstr *LastCMOV = MI;
- X86::CondCode CC = X86::CondCode(MI->getOperand(3).getImm());
+ MachineInstr *LastCMOV = &MI;
+ X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
MachineBasicBlock::iterator NextMIIt =
std::next(MachineBasicBlock::iterator(MI));
@@ -21849,8 +23269,7 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
if (isCMOVPseudo(MI)) {
// See if we have a string of CMOVS with the same condition.
- while (NextMIIt != BB->end() &&
- isCMOVPseudo(NextMIIt) &&
+ while (NextMIIt != BB->end() && isCMOVPseudo(*NextMIIt) &&
(NextMIIt->getOperand(3).getImm() == CC ||
NextMIIt->getOperand(3).getImm() == OppCC)) {
LastCMOV = &*NextMIIt;
@@ -21860,10 +23279,10 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
// This checks for case 2, but only do this if we didn't already find
// case 1, as indicated by LastCMOV == MI.
- if (LastCMOV == MI &&
- NextMIIt != BB->end() && NextMIIt->getOpcode() == MI->getOpcode() &&
- NextMIIt->getOperand(2).getReg() == MI->getOperand(2).getReg() &&
- NextMIIt->getOperand(1).getReg() == MI->getOperand(0).getReg() &&
+ if (LastCMOV == &MI && NextMIIt != BB->end() &&
+ NextMIIt->getOpcode() == MI.getOpcode() &&
+ NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
+ NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
NextMIIt->getOperand(1).isKill()) {
CascadedCMOV = &*NextMIIt;
}
@@ -21885,7 +23304,7 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
// If the EFLAGS register isn't dead in the terminator, then claim that it's
// live into the sink and copy blocks.
- const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
+ const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
MachineInstr *LastEFLAGSUser = CascadedCMOV ? CascadedCMOV : LastCMOV;
if (!LastEFLAGSUser->killsRegister(X86::EFLAGS) &&
@@ -21976,12 +23395,12 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
// If we have a cascaded CMOV, the second Jcc provides the same incoming
// value as the first Jcc (the True operand of the SELECT_CC/CMOV nodes).
if (CascadedCMOV) {
- MIB.addReg(MI->getOperand(2).getReg()).addMBB(jcc1MBB);
+ MIB.addReg(MI.getOperand(2).getReg()).addMBB(jcc1MBB);
// Copy the PHI result to the register defined by the second CMOV.
BuildMI(*sinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())),
DL, TII->get(TargetOpcode::COPY),
CascadedCMOV->getOperand(0).getReg())
- .addReg(MI->getOperand(0).getReg());
+ .addReg(MI.getOperand(0).getReg());
CascadedCMOV->eraseFromParent();
}
@@ -21993,7 +23412,7 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
}
MachineBasicBlock *
-X86TargetLowering::EmitLoweredAtomicFP(MachineInstr *MI,
+X86TargetLowering::EmitLoweredAtomicFP(MachineInstr &MI,
MachineBasicBlock *BB) const {
// Combine the following atomic floating-point modification pattern:
// a.store(reg OP a.load(acquire), release)
@@ -22002,52 +23421,55 @@ X86TargetLowering::EmitLoweredAtomicFP(MachineInstr *MI,
// movss %xmm, (%gpr)
// Or sd equivalent for 64-bit operations.
unsigned MOp, FOp;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("unexpected instr type for EmitLoweredAtomicFP");
- case X86::RELEASE_FADD32mr: MOp = X86::MOVSSmr; FOp = X86::ADDSSrm; break;
- case X86::RELEASE_FADD64mr: MOp = X86::MOVSDmr; FOp = X86::ADDSDrm; break;
+ case X86::RELEASE_FADD32mr:
+ FOp = X86::ADDSSrm;
+ MOp = X86::MOVSSmr;
+ break;
+ case X86::RELEASE_FADD64mr:
+ FOp = X86::ADDSDrm;
+ MOp = X86::MOVSDmr;
+ break;
}
- const X86InstrInfo *TII = Subtarget->getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ const X86InstrInfo *TII = Subtarget.getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
- MachineOperand MSrc = MI->getOperand(0);
- unsigned VSrc = MI->getOperand(5).getReg();
- const MachineOperand &Disp = MI->getOperand(3);
- MachineOperand ZeroDisp = MachineOperand::CreateImm(0);
- bool hasDisp = Disp.isGlobal() || Disp.isImm();
- if (hasDisp && MSrc.isReg())
- MSrc.setIsKill(false);
- MachineInstrBuilder MIM = BuildMI(*BB, MI, DL, TII->get(MOp))
- .addOperand(/*Base=*/MSrc)
- .addImm(/*Scale=*/1)
- .addReg(/*Index=*/0)
- .addDisp(hasDisp ? Disp : ZeroDisp, /*off=*/0)
- .addReg(0);
- MachineInstr *MIO = BuildMI(*BB, (MachineInstr *)MIM, DL, TII->get(FOp),
- MRI.createVirtualRegister(MRI.getRegClass(VSrc)))
- .addReg(VSrc)
- .addOperand(/*Base=*/MSrc)
- .addImm(/*Scale=*/1)
- .addReg(/*Index=*/0)
- .addDisp(hasDisp ? Disp : ZeroDisp, /*off=*/0)
- .addReg(/*Segment=*/0);
- MIM.addReg(MIO->getOperand(0).getReg(), RegState::Kill);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ unsigned ValOpIdx = X86::AddrNumOperands;
+ unsigned VSrc = MI.getOperand(ValOpIdx).getReg();
+ MachineInstrBuilder MIB =
+ BuildMI(*BB, MI, DL, TII->get(FOp),
+ MRI.createVirtualRegister(MRI.getRegClass(VSrc)))
+ .addReg(VSrc);
+ for (int i = 0; i < X86::AddrNumOperands; ++i) {
+ MachineOperand &Operand = MI.getOperand(i);
+ // Clear any kill flags on register operands as we'll create a second
+ // instruction using the same address operands.
+ if (Operand.isReg())
+ Operand.setIsKill(false);
+ MIB.addOperand(Operand);
+ }
+ MachineInstr *FOpMI = MIB;
+ MIB = BuildMI(*BB, MI, DL, TII->get(MOp));
+ for (int i = 0; i < X86::AddrNumOperands; ++i)
+ MIB.addOperand(MI.getOperand(i));
+ MIB.addReg(FOpMI->getOperand(0).getReg(), RegState::Kill);
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
MachineBasicBlock *
-X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
+X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
assert(MF->shouldSplitStack());
- const bool Is64Bit = Subtarget->is64Bit();
- const bool IsLP64 = Subtarget->isTarget64BitLP64();
+ const bool Is64Bit = Subtarget.is64Bit();
+ const bool IsLP64 = Subtarget.isTarget64BitLP64();
const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
@@ -22077,11 +23499,12 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
getRegClassFor(getPointerTy(MF->getDataLayout()));
unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
- bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
- tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
- SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
- sizeVReg = MI->getOperand(1).getReg(),
- physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
+ bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
+ tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
+ SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
+ sizeVReg = MI.getOperand(1).getReg(),
+ physSPReg =
+ IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
MachineFunction::iterator MBBIter = ++BB->getIterator();
@@ -22113,7 +23536,7 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
// Calls into a routine in libgcc to allocate more space from the heap.
const uint32_t *RegMask =
- Subtarget->getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
+ Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
if (IsLP64) {
BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
.addReg(sizeVReg);
@@ -22156,43 +23579,33 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
// Take care of the PHI nodes.
BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
- MI->getOperand(0).getReg())
- .addReg(mallocPtrVReg).addMBB(mallocMBB)
- .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
+ MI.getOperand(0).getReg())
+ .addReg(mallocPtrVReg)
+ .addMBB(mallocMBB)
+ .addReg(bumpSPPtrVReg)
+ .addMBB(bumpMBB);
// Delete the original pseudo instruction.
- MI->eraseFromParent();
+ MI.eraseFromParent();
// And we're done.
return continueMBB;
}
MachineBasicBlock *
-X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
- MachineBasicBlock *BB) const {
- assert(!Subtarget->isTargetMachO());
- DebugLoc DL = MI->getDebugLoc();
- MachineInstr *ResumeMI = Subtarget->getFrameLowering()->emitStackProbe(
- *BB->getParent(), *BB, MI, DL, false);
- MachineBasicBlock *ResumeBB = ResumeMI->getParent();
- MI->eraseFromParent(); // The pseudo instruction is gone now.
- return ResumeBB;
-}
-
-MachineBasicBlock *
-X86TargetLowering::EmitLoweredCatchRet(MachineInstr *MI,
+X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
- const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
- MachineBasicBlock *TargetMBB = MI->getOperand(0).getMBB();
- DebugLoc DL = MI->getDebugLoc();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
+ MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
+ DebugLoc DL = MI.getDebugLoc();
assert(!isAsynchronousEHPersonality(
classifyEHPersonality(MF->getFunction()->getPersonalityFn())) &&
"SEH does not use catchret!");
// Only 32-bit EH needs to worry about manually restoring stack pointers.
- if (!Subtarget->is32Bit())
+ if (!Subtarget.is32Bit())
return BB;
// C++ EH creates a new target block to hold the restore code, and wires up
@@ -22203,7 +23616,7 @@ X86TargetLowering::EmitLoweredCatchRet(MachineInstr *MI,
MF->insert(std::next(BB->getIterator()), RestoreMBB);
RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
BB->addSuccessor(RestoreMBB);
- MI->getOperand(0).setMBB(RestoreMBB);
+ MI.getOperand(0).setMBB(RestoreMBB);
auto RestoreMBBI = RestoreMBB->begin();
BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE));
@@ -22212,37 +23625,37 @@ X86TargetLowering::EmitLoweredCatchRet(MachineInstr *MI,
}
MachineBasicBlock *
-X86TargetLowering::EmitLoweredCatchPad(MachineInstr *MI,
+X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
const Constant *PerFn = MF->getFunction()->getPersonalityFn();
bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
// Only 32-bit SEH requires special handling for catchpad.
- if (IsSEH && Subtarget->is32Bit()) {
- const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ if (IsSEH && Subtarget.is32Bit()) {
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE));
}
- MI->eraseFromParent();
+ MI.eraseFromParent();
return BB;
}
MachineBasicBlock *
-X86TargetLowering::EmitLoweredTLSAddr(MachineInstr *MI,
+X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
MachineBasicBlock *BB) const {
// So, here we replace TLSADDR with the sequence:
// adjust_stackdown -> TLSADDR -> adjust_stackup.
// We need this because TLSADDR is lowered into calls
// inside MC, therefore without the two markers shrink-wrapping
// may push the prologue/epilogue pass them.
- const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
MachineFunction &MF = *BB->getParent();
// Emit CALLSEQ_START right before the instruction.
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
MachineInstrBuilder CallseqStart =
- BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0);
+ BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0);
BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
// Emit CALLSEQ_END right after the instruction.
@@ -22257,86 +23670,89 @@ X86TargetLowering::EmitLoweredTLSAddr(MachineInstr *MI,
}
MachineBasicBlock *
-X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
+X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
MachineBasicBlock *BB) const {
// This is pretty easy. We're taking the value that we received from
// our load from the relocation, sticking it in either RDI (x86-64)
// or EAX and doing an indirect call. The return value will then
// be in the normal return register.
MachineFunction *F = BB->getParent();
- const X86InstrInfo *TII = Subtarget->getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ const X86InstrInfo *TII = Subtarget.getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
- assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
- assert(MI->getOperand(3).isGlobal() && "This should be a global");
+ assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
+ assert(MI.getOperand(3).isGlobal() && "This should be a global");
// Get a register mask for the lowered call.
// FIXME: The 32-bit calls have non-standard calling conventions. Use a
// proper register mask.
const uint32_t *RegMask =
- Subtarget->is64Bit() ?
- Subtarget->getRegisterInfo()->getDarwinTLSCallPreservedMask() :
- Subtarget->getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
- if (Subtarget->is64Bit()) {
- MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
- TII->get(X86::MOV64rm), X86::RDI)
- .addReg(X86::RIP)
- .addImm(0).addReg(0)
- .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
- MI->getOperand(3).getTargetFlags())
- .addReg(0);
+ Subtarget.is64Bit() ?
+ Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
+ Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
+ if (Subtarget.is64Bit()) {
+ MachineInstrBuilder MIB =
+ BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
+ .addReg(X86::RIP)
+ .addImm(0)
+ .addReg(0)
+ .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
+ MI.getOperand(3).getTargetFlags())
+ .addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
addDirectMem(MIB, X86::RDI);
MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
- } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
- MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
- TII->get(X86::MOV32rm), X86::EAX)
- .addReg(0)
- .addImm(0).addReg(0)
- .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
- MI->getOperand(3).getTargetFlags())
- .addReg(0);
+ } else if (!isPositionIndependent()) {
+ MachineInstrBuilder MIB =
+ BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
+ .addReg(0)
+ .addImm(0)
+ .addReg(0)
+ .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
+ MI.getOperand(3).getTargetFlags())
+ .addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
addDirectMem(MIB, X86::EAX);
MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
} else {
- MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
- TII->get(X86::MOV32rm), X86::EAX)
- .addReg(TII->getGlobalBaseReg(F))
- .addImm(0).addReg(0)
- .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
- MI->getOperand(3).getTargetFlags())
- .addReg(0);
+ MachineInstrBuilder MIB =
+ BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
+ .addReg(TII->getGlobalBaseReg(F))
+ .addImm(0)
+ .addReg(0)
+ .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
+ MI.getOperand(3).getTargetFlags())
+ .addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
addDirectMem(MIB, X86::EAX);
MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
}
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
MachineBasicBlock *
-X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
+X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const {
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = MBB->getParent();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
const BasicBlock *BB = MBB->getBasicBlock();
MachineFunction::iterator I = ++MBB->getIterator();
// Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
+ MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
+ MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
unsigned DstReg;
unsigned MemOpndSlot = 0;
unsigned CurOp = 0;
- DstReg = MI->getOperand(CurOp++).getReg();
+ DstReg = MI.getOperand(CurOp++).getReg();
const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
assert(RC->hasType(MVT::i32) && "Invalid destination!");
unsigned mainDstReg = MRI.createVirtualRegister(RC);
@@ -22384,16 +23800,15 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
unsigned PtrStoreOpc = 0;
unsigned LabelReg = 0;
const int64_t LabelOffset = 1 * PVT.getStoreSize();
- Reloc::Model RM = MF->getTarget().getRelocationModel();
bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
- (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
+ !isPositionIndependent();
// Prepare IP either in reg or imm.
if (!UseImmLabel) {
PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
LabelReg = MRI.createVirtualRegister(PtrRC);
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
.addReg(X86::RIP)
.addImm(0)
@@ -22406,7 +23821,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
.addReg(XII->getGlobalBaseReg(MF))
.addImm(0)
.addReg(0)
- .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
+ .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
.addReg(0);
}
} else
@@ -22415,9 +23830,9 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
if (i == X86::AddrDisp)
- MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
+ MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
else
- MIB.addOperand(MI->getOperand(MemOpndSlot + i));
+ MIB.addOperand(MI.getOperand(MemOpndSlot + i));
}
if (!UseImmLabel)
MIB.addReg(LabelReg);
@@ -22428,7 +23843,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
.addMBB(restoreMBB);
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
MIB.addRegMask(RegInfo->getNoPreservedMask());
thisMBB->addSuccessor(mainMBB);
thisMBB->addSuccessor(restoreMBB);
@@ -22447,7 +23862,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
// restoreMBB:
if (RegInfo->hasBasePointer(*MF)) {
const bool Uses64BitFramePtr =
- Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
+ Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
X86FI->setRestoreBasePointer(MF);
unsigned FramePtr = RegInfo->getFrameRegister(*MF);
@@ -22461,21 +23876,21 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
restoreMBB->addSuccessor(sinkMBB);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return sinkMBB;
}
MachineBasicBlock *
-X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
+X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const {
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = MBB->getParent();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
// Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
+ MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
+ MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
MVT PVT = getPointerTy(MF->getDataLayout());
assert((PVT == MVT::i64 || PVT == MVT::i32) &&
@@ -22485,7 +23900,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
(PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
unsigned Tmp = MRI.createVirtualRegister(RC);
// Since FP is only updated here but NOT referenced, it's treated as GPR.
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
unsigned SP = RegInfo->getStackRegister();
@@ -22500,41 +23915,275 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
// Reload FP
MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
- MIB.addOperand(MI->getOperand(i));
+ MIB.addOperand(MI.getOperand(i));
MIB.setMemRefs(MMOBegin, MMOEnd);
// Reload IP
MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
if (i == X86::AddrDisp)
- MIB.addDisp(MI->getOperand(i), LabelOffset);
+ MIB.addDisp(MI.getOperand(i), LabelOffset);
else
- MIB.addOperand(MI->getOperand(i));
+ MIB.addOperand(MI.getOperand(i));
}
MIB.setMemRefs(MMOBegin, MMOEnd);
// Reload SP
MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
if (i == X86::AddrDisp)
- MIB.addDisp(MI->getOperand(i), SPOffset);
+ MIB.addDisp(MI.getOperand(i), SPOffset);
else
- MIB.addOperand(MI->getOperand(i));
+ MIB.addOperand(MI.getOperand(i));
}
MIB.setMemRefs(MMOBegin, MMOEnd);
// Jump
BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return MBB;
}
+void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
+ MachineBasicBlock *MBB,
+ MachineBasicBlock *DispatchBB,
+ int FI) const {
+ DebugLoc DL = MI.getDebugLoc();
+ MachineFunction *MF = MBB->getParent();
+ MachineRegisterInfo *MRI = &MF->getRegInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+
+ MVT PVT = getPointerTy(MF->getDataLayout());
+ assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
+
+ unsigned Op = 0;
+ unsigned VR = 0;
+
+ bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
+ !isPositionIndependent();
+
+ if (UseImmLabel) {
+ Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
+ } else {
+ const TargetRegisterClass *TRC =
+ (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
+ VR = MRI->createVirtualRegister(TRC);
+ Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
+
+ /* const X86InstrInfo *XII = static_cast<const X86InstrInfo *>(TII); */
+
+ if (Subtarget.is64Bit())
+ BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
+ .addReg(X86::RIP)
+ .addImm(1)
+ .addReg(0)
+ .addMBB(DispatchBB)
+ .addReg(0);
+ else
+ BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
+ .addReg(0) /* XII->getGlobalBaseReg(MF) */
+ .addImm(1)
+ .addReg(0)
+ .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
+ .addReg(0);
+ }
+
+ MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
+ addFrameReference(MIB, FI, 36);
+ if (UseImmLabel)
+ MIB.addMBB(DispatchBB);
+ else
+ MIB.addReg(VR);
+}
+
+MachineBasicBlock *
+X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
+ MachineBasicBlock *BB) const {
+ DebugLoc DL = MI.getDebugLoc();
+ MachineFunction *MF = BB->getParent();
+ MachineModuleInfo *MMI = &MF->getMMI();
+ MachineFrameInfo *MFI = MF->getFrameInfo();
+ MachineRegisterInfo *MRI = &MF->getRegInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+ int FI = MFI->getFunctionContextIndex();
+
+ // Get a mapping of the call site numbers to all of the landing pads they're
+ // associated with.
+ DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
+ unsigned MaxCSNum = 0;
+ for (auto &MBB : *MF) {
+ if (!MBB.isEHPad())
+ continue;
+
+ MCSymbol *Sym = nullptr;
+ for (const auto &MI : MBB) {
+ if (MI.isDebugValue())
+ continue;
+
+ assert(MI.isEHLabel() && "expected EH_LABEL");
+ Sym = MI.getOperand(0).getMCSymbol();
+ break;
+ }
+
+ if (!MMI->hasCallSiteLandingPad(Sym))
+ continue;
+
+ for (unsigned CSI : MMI->getCallSiteLandingPad(Sym)) {
+ CallSiteNumToLPad[CSI].push_back(&MBB);
+ MaxCSNum = std::max(MaxCSNum, CSI);
+ }
+ }
+
+ // Get an ordered list of the machine basic blocks for the jump table.
+ std::vector<MachineBasicBlock *> LPadList;
+ SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
+ LPadList.reserve(CallSiteNumToLPad.size());
+
+ for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
+ for (auto &LP : CallSiteNumToLPad[CSI]) {
+ LPadList.push_back(LP);
+ InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
+ }
+ }
+
+ assert(!LPadList.empty() &&
+ "No landing pad destinations for the dispatch jump table!");
+
+ // Create the MBBs for the dispatch code.
+
+ // Shove the dispatch's address into the return slot in the function context.
+ MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
+ DispatchBB->setIsEHPad(true);
+
+ MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
+ BuildMI(TrapBB, DL, TII->get(X86::TRAP));
+ DispatchBB->addSuccessor(TrapBB);
+
+ MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
+ DispatchBB->addSuccessor(DispContBB);
+
+ // Insert MBBs.
+ MF->push_back(DispatchBB);
+ MF->push_back(DispContBB);
+ MF->push_back(TrapBB);
+
+ // Insert code into the entry block that creates and registers the function
+ // context.
+ SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
+
+ // Create the jump table and associated information
+ MachineJumpTableInfo *JTI =
+ MF->getOrCreateJumpTableInfo(getJumpTableEncoding());
+ unsigned MJTI = JTI->createJumpTableIndex(LPadList);
+
+ const X86InstrInfo *XII = static_cast<const X86InstrInfo *>(TII);
+ const X86RegisterInfo &RI = XII->getRegisterInfo();
+
+ // Add a register mask with no preserved registers. This results in all
+ // registers being marked as clobbered.
+ if (RI.hasBasePointer(*MF)) {
+ const bool FPIs64Bit =
+ Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
+ X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
+ MFI->setRestoreBasePointer(MF);
+
+ unsigned FP = RI.getFrameRegister(*MF);
+ unsigned BP = RI.getBaseRegister();
+ unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
+ addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
+ MFI->getRestoreBasePointerOffset())
+ .addRegMask(RI.getNoPreservedMask());
+ } else {
+ BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
+ .addRegMask(RI.getNoPreservedMask());
+ }
+
+ unsigned IReg = MRI->createVirtualRegister(&X86::GR32RegClass);
+ addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
+ 4);
+ BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
+ .addReg(IReg)
+ .addImm(LPadList.size());
+ BuildMI(DispatchBB, DL, TII->get(X86::JA_1)).addMBB(TrapBB);
+
+ unsigned JReg = MRI->createVirtualRegister(&X86::GR32RegClass);
+ BuildMI(DispContBB, DL, TII->get(X86::SUB32ri), JReg)
+ .addReg(IReg)
+ .addImm(1);
+ BuildMI(DispContBB, DL,
+ TII->get(Subtarget.is64Bit() ? X86::JMP64m : X86::JMP32m))
+ .addReg(0)
+ .addImm(Subtarget.is64Bit() ? 8 : 4)
+ .addReg(JReg)
+ .addJumpTableIndex(MJTI)
+ .addReg(0);
+
+ // Add the jump table entries as successors to the MBB.
+ SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
+ for (auto &LP : LPadList)
+ if (SeenMBBs.insert(LP).second)
+ DispContBB->addSuccessor(LP);
+
+ // N.B. the order the invoke BBs are processed in doesn't matter here.
+ SmallVector<MachineBasicBlock *, 64> MBBLPads;
+ const MCPhysReg *SavedRegs =
+ Subtarget.getRegisterInfo()->getCalleeSavedRegs(MF);
+ for (MachineBasicBlock *MBB : InvokeBBs) {
+ // Remove the landing pad successor from the invoke block and replace it
+ // with the new dispatch block.
+ // Keep a copy of Successors since it's modified inside the loop.
+ SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
+ MBB->succ_rend());
+ // FIXME: Avoid quadratic complexity.
+ for (auto MBBS : Successors) {
+ if (MBBS->isEHPad()) {
+ MBB->removeSuccessor(MBBS);
+ MBBLPads.push_back(MBBS);
+ }
+ }
+
+ MBB->addSuccessor(DispatchBB);
+
+ // Find the invoke call and mark all of the callee-saved registers as
+ // 'implicit defined' so that they're spilled. This prevents code from
+ // moving instructions to before the EH block, where they will never be
+ // executed.
+ for (auto &II : reverse(*MBB)) {
+ if (!II.isCall())
+ continue;
+
+ DenseMap<unsigned, bool> DefRegs;
+ for (auto &MOp : II.operands())
+ if (MOp.isReg())
+ DefRegs[MOp.getReg()] = true;
+
+ MachineInstrBuilder MIB(*MF, &II);
+ for (unsigned RI = 0; SavedRegs[RI]; ++RI) {
+ unsigned Reg = SavedRegs[RI];
+ if (!DefRegs[Reg])
+ MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
+ }
+
+ break;
+ }
+ }
+
+ // Mark all former landing pads as non-landing pads. The dispatch is the only
+ // landing pad now.
+ for (auto &LP : MBBLPads)
+ LP->setIsEHPad(false);
+
+ // The instruction is gone now.
+ MI.eraseFromParent();
+ return BB;
+}
+
// Replace 213-type (isel default) FMA3 instructions with 231-type for
// accumulator loops. Writing back to the accumulator allows the coalescer
// to remove extra copies in the loop.
// FIXME: Do this on AVX512. We don't support 231 variants yet (PR23937).
MachineBasicBlock *
-X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
+X86TargetLowering::emitFMA3Instr(MachineInstr &MI,
MachineBasicBlock *MBB) const {
- MachineOperand &AddendOp = MI->getOperand(3);
+ MachineOperand &AddendOp = MI.getOperand(3);
// Bail out early if the addend isn't a register - we can't switch these.
if (!AddendOp.isReg())
@@ -22565,55 +24214,120 @@ X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
assert(AddendDef.getOperand(i).isReg());
MachineOperand PHISrcOp = AddendDef.getOperand(i);
MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
- if (&PHISrcInst == MI) {
+ if (&PHISrcInst == &MI) {
// Found a matching instruction.
unsigned NewFMAOpc = 0;
- switch (MI->getOpcode()) {
- case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
- case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
- case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
- case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
- case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
- case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
- case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
- case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
- case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
- case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
- case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
- case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
- case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
- case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
- case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
- case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
- case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
- case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
- case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
- case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
-
- case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
- case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
- case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
- case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
- case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
- case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
- case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
- case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
- case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
- case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
- case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
- case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
- default: llvm_unreachable("Unrecognized FMA variant.");
+ switch (MI.getOpcode()) {
+ case X86::VFMADDPDr213r:
+ NewFMAOpc = X86::VFMADDPDr231r;
+ break;
+ case X86::VFMADDPSr213r:
+ NewFMAOpc = X86::VFMADDPSr231r;
+ break;
+ case X86::VFMADDSDr213r:
+ NewFMAOpc = X86::VFMADDSDr231r;
+ break;
+ case X86::VFMADDSSr213r:
+ NewFMAOpc = X86::VFMADDSSr231r;
+ break;
+ case X86::VFMSUBPDr213r:
+ NewFMAOpc = X86::VFMSUBPDr231r;
+ break;
+ case X86::VFMSUBPSr213r:
+ NewFMAOpc = X86::VFMSUBPSr231r;
+ break;
+ case X86::VFMSUBSDr213r:
+ NewFMAOpc = X86::VFMSUBSDr231r;
+ break;
+ case X86::VFMSUBSSr213r:
+ NewFMAOpc = X86::VFMSUBSSr231r;
+ break;
+ case X86::VFNMADDPDr213r:
+ NewFMAOpc = X86::VFNMADDPDr231r;
+ break;
+ case X86::VFNMADDPSr213r:
+ NewFMAOpc = X86::VFNMADDPSr231r;
+ break;
+ case X86::VFNMADDSDr213r:
+ NewFMAOpc = X86::VFNMADDSDr231r;
+ break;
+ case X86::VFNMADDSSr213r:
+ NewFMAOpc = X86::VFNMADDSSr231r;
+ break;
+ case X86::VFNMSUBPDr213r:
+ NewFMAOpc = X86::VFNMSUBPDr231r;
+ break;
+ case X86::VFNMSUBPSr213r:
+ NewFMAOpc = X86::VFNMSUBPSr231r;
+ break;
+ case X86::VFNMSUBSDr213r:
+ NewFMAOpc = X86::VFNMSUBSDr231r;
+ break;
+ case X86::VFNMSUBSSr213r:
+ NewFMAOpc = X86::VFNMSUBSSr231r;
+ break;
+ case X86::VFMADDSUBPDr213r:
+ NewFMAOpc = X86::VFMADDSUBPDr231r;
+ break;
+ case X86::VFMADDSUBPSr213r:
+ NewFMAOpc = X86::VFMADDSUBPSr231r;
+ break;
+ case X86::VFMSUBADDPDr213r:
+ NewFMAOpc = X86::VFMSUBADDPDr231r;
+ break;
+ case X86::VFMSUBADDPSr213r:
+ NewFMAOpc = X86::VFMSUBADDPSr231r;
+ break;
+
+ case X86::VFMADDPDr213rY:
+ NewFMAOpc = X86::VFMADDPDr231rY;
+ break;
+ case X86::VFMADDPSr213rY:
+ NewFMAOpc = X86::VFMADDPSr231rY;
+ break;
+ case X86::VFMSUBPDr213rY:
+ NewFMAOpc = X86::VFMSUBPDr231rY;
+ break;
+ case X86::VFMSUBPSr213rY:
+ NewFMAOpc = X86::VFMSUBPSr231rY;
+ break;
+ case X86::VFNMADDPDr213rY:
+ NewFMAOpc = X86::VFNMADDPDr231rY;
+ break;
+ case X86::VFNMADDPSr213rY:
+ NewFMAOpc = X86::VFNMADDPSr231rY;
+ break;
+ case X86::VFNMSUBPDr213rY:
+ NewFMAOpc = X86::VFNMSUBPDr231rY;
+ break;
+ case X86::VFNMSUBPSr213rY:
+ NewFMAOpc = X86::VFNMSUBPSr231rY;
+ break;
+ case X86::VFMADDSUBPDr213rY:
+ NewFMAOpc = X86::VFMADDSUBPDr231rY;
+ break;
+ case X86::VFMADDSUBPSr213rY:
+ NewFMAOpc = X86::VFMADDSUBPSr231rY;
+ break;
+ case X86::VFMSUBADDPDr213rY:
+ NewFMAOpc = X86::VFMSUBADDPDr231rY;
+ break;
+ case X86::VFMSUBADDPSr213rY:
+ NewFMAOpc = X86::VFMSUBADDPSr231rY;
+ break;
+ default:
+ llvm_unreachable("Unrecognized FMA variant.");
}
- const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
MachineInstrBuilder MIB =
- BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
- .addOperand(MI->getOperand(0))
- .addOperand(MI->getOperand(3))
- .addOperand(MI->getOperand(2))
- .addOperand(MI->getOperand(1));
+ BuildMI(MF, MI.getDebugLoc(), TII.get(NewFMAOpc))
+ .addOperand(MI.getOperand(0))
+ .addOperand(MI.getOperand(3))
+ .addOperand(MI.getOperand(2))
+ .addOperand(MI.getOperand(1));
MBB->insert(MachineBasicBlock::iterator(MI), MIB);
- MI->eraseFromParent();
+ MI.eraseFromParent();
}
}
@@ -22621,9 +24335,9 @@ X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
}
MachineBasicBlock *
-X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
+X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("Unexpected instr type to insert");
case X86::TAILJMPd64:
case X86::TAILJMPr64:
@@ -22641,8 +24355,6 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::TLS_base_addr32:
case X86::TLS_base_addr64:
return EmitLoweredTLSAddr(MI, BB);
- case X86::WIN_ALLOCA:
- return EmitLoweredWinAlloca(MI, BB);
case X86::CATCHRET:
return EmitLoweredCatchRet(MI, BB);
case X86::CATCHPAD:
@@ -22679,31 +24391,35 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::RDFLAGS32:
case X86::RDFLAGS64: {
- DebugLoc DL = MI->getDebugLoc();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
unsigned PushF =
- MI->getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
- unsigned Pop =
- MI->getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
- BuildMI(*BB, MI, DL, TII->get(PushF));
- BuildMI(*BB, MI, DL, TII->get(Pop), MI->getOperand(0).getReg());
-
- MI->eraseFromParent(); // The pseudo is gone now.
+ MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
+ unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
+ MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
+ // Permit reads of the FLAGS register without it being defined.
+ // This intrinsic exists to read external processor state in flags, such as
+ // the trap flag, interrupt flag, and direction flag, none of which are
+ // modeled by the backend.
+ Push->getOperand(2).setIsUndef();
+ BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
+
+ MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
case X86::WRFLAGS32:
case X86::WRFLAGS64: {
- DebugLoc DL = MI->getDebugLoc();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
unsigned Push =
- MI->getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
+ MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
unsigned PopF =
- MI->getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
- BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI->getOperand(0).getReg());
+ MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
+ BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
BuildMI(*BB, MI, DL, TII->get(PopF));
- MI->eraseFromParent(); // The pseudo is gone now.
+ MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
@@ -22721,8 +24437,8 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::FP80_TO_INT32_IN_MEM:
case X86::FP80_TO_INT64_IN_MEM: {
MachineFunction *F = BB->getParent();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
// Change the floating point control register to use "round towards zero"
// mode when truncating to an integer value.
@@ -22750,7 +24466,7 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// Get the X86 opcode to use.
unsigned Opc;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("illegal opcode!");
case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
@@ -22763,35 +24479,15 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
}
- X86AddressMode AM;
- MachineOperand &Op = MI->getOperand(0);
- if (Op.isReg()) {
- AM.BaseType = X86AddressMode::RegBase;
- AM.Base.Reg = Op.getReg();
- } else {
- AM.BaseType = X86AddressMode::FrameIndexBase;
- AM.Base.FrameIndex = Op.getIndex();
- }
- Op = MI->getOperand(1);
- if (Op.isImm())
- AM.Scale = Op.getImm();
- Op = MI->getOperand(2);
- if (Op.isImm())
- AM.IndexReg = Op.getImm();
- Op = MI->getOperand(3);
- if (Op.isGlobal()) {
- AM.GV = Op.getGlobal();
- } else {
- AM.Disp = Op.getImm();
- }
+ X86AddressMode AM = getAddressFromInstr(&MI, 0);
addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
- .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
+ .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
// Reload the original control word now.
addFrameReference(BuildMI(*BB, MI, DL,
TII->get(X86::FLDCW16m)), CWFrameIdx);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
// String/text processing lowering.
@@ -22803,9 +24499,9 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::VPCMPESTRM128REG:
case X86::PCMPESTRM128MEM:
case X86::VPCMPESTRM128MEM:
- assert(Subtarget->hasSSE42() &&
+ assert(Subtarget.hasSSE42() &&
"Target must have SSE4.2 or AVX features enabled");
- return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
+ return emitPCMPSTRM(MI, BB, Subtarget.getInstrInfo());
// String/text processing lowering.
case X86::PCMPISTRIREG:
@@ -22816,21 +24512,23 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::VPCMPESTRIREG:
case X86::PCMPESTRIMEM:
case X86::VPCMPESTRIMEM:
- assert(Subtarget->hasSSE42() &&
+ assert(Subtarget.hasSSE42() &&
"Target must have SSE4.2 or AVX features enabled");
- return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
+ return emitPCMPSTRI(MI, BB, Subtarget.getInstrInfo());
// Thread synchronization.
case X86::MONITOR:
- return EmitMonitor(MI, BB, Subtarget);
+ return emitMonitor(MI, BB, Subtarget, X86::MONITORrrr);
+ case X86::MONITORX:
+ return emitMonitor(MI, BB, Subtarget, X86::MONITORXrrr);
// PKU feature
case X86::WRPKRU:
- return EmitWRPKRU(MI, BB, Subtarget);
+ return emitWRPKRU(MI, BB, Subtarget);
case X86::RDPKRU:
- return EmitRDPKRU(MI, BB, Subtarget);
+ return emitRDPKRU(MI, BB, Subtarget);
// xbegin
case X86::XBEGIN:
- return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
+ return emitXBegin(MI, BB, Subtarget.getInstrInfo());
case X86::VASTART_SAVE_XMM_REGS:
return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
@@ -22846,6 +24544,9 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::EH_SjLj_LongJmp64:
return emitEHSjLjLongJmp(MI, BB);
+ case X86::Int_eh_sjlj_setup_dispatch:
+ return EmitSjLjDispatchBlock(MI, BB);
+
case TargetOpcode::STATEPOINT:
// As an implementation detail, STATEPOINT shares the STACKMAP format at
// this point in the process. We diverge later.
@@ -22888,6 +24589,14 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::VFMSUBADDPDr213rY:
case X86::VFMSUBADDPSr213rY:
return emitFMA3Instr(MI, BB);
+ case X86::LCMPXCHG8B_SAVE_EBX:
+ case X86::LCMPXCHG16B_SAVE_RBX: {
+ unsigned BasePtr =
+ MI.getOpcode() == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
+ if (!BB->isLiveIn(BasePtr))
+ BB->addLiveIn(BasePtr);
+ return BB;
+ }
}
}
@@ -22930,33 +24639,9 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
case X86ISD::SETCC:
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
break;
- case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- unsigned NumLoBits = 0;
- switch (IntId) {
- default: break;
- case Intrinsic::x86_sse_movmsk_ps:
- case Intrinsic::x86_avx_movmsk_ps_256:
- case Intrinsic::x86_sse2_movmsk_pd:
- case Intrinsic::x86_avx_movmsk_pd_256:
- case Intrinsic::x86_mmx_pmovmskb:
- case Intrinsic::x86_sse2_pmovmskb_128:
- case Intrinsic::x86_avx2_pmovmskb: {
- // High bits of movmskp{s|d}, pmovmskb are known zero.
- switch (IntId) {
- default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
- case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
- case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
- case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
- case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
- case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
- case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
- case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
- }
- KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
- break;
- }
- }
+ case X86ISD::MOVMSK: {
+ unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
+ KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
break;
}
}
@@ -22974,8 +24659,8 @@ unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
return 1;
}
-/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
-/// node is a GlobalAddress + offset.
+/// Returns true (and the GlobalValue and the offset) if the node is a
+/// GlobalAddress + offset.
bool X86TargetLowering::isGAPlusOffset(SDNode *N,
const GlobalValue* &GA,
int64_t &Offset) const {
@@ -22989,11 +24674,11 @@ bool X86TargetLowering::isGAPlusOffset(SDNode *N,
return TargetLowering::isGAPlusOffset(N, GA, Offset);
}
-/// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
+/// Performs shuffle combines for 256-bit vectors.
/// FIXME: This could be expanded to support 512 bit vectors as well.
-static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget* Subtarget) {
+static SDValue combineShuffle256(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDLoc dl(N);
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
SDValue V1 = SVOp->getOperand(0);
@@ -23014,8 +24699,7 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
// RESULT: V + zero extended
//
if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
- V2.getOperand(1).getOpcode() != ISD::UNDEF ||
- V1.getOperand(1).getOpcode() != ISD::UNDEF)
+ !V2.getOperand(1).isUndef() || !V1.getOperand(1).isUndef())
return SDValue();
if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
@@ -23060,195 +24744,556 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
// Emit a zeroed vector and insert the desired subvector on its
// first half.
SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
- SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
+ SDValue InsV = insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
return DCI.CombineTo(N, InsV);
}
return SDValue();
}
+// Attempt to match a combined shuffle mask against supported unary shuffle
+// instructions.
+// TODO: Investigate sharing more of this with shuffle lowering.
+static bool matchUnaryVectorShuffle(MVT SrcVT, ArrayRef<int> Mask,
+ const X86Subtarget &Subtarget,
+ unsigned &Shuffle, MVT &ShuffleVT) {
+ bool FloatDomain = SrcVT.isFloatingPoint() ||
+ (!Subtarget.hasAVX2() && SrcVT.is256BitVector());
+
+ // Match a 128-bit integer vector against a VZEXT_MOVL (MOVQ) instruction.
+ if (!FloatDomain && SrcVT.is128BitVector() &&
+ isTargetShuffleEquivalent(Mask, {0, SM_SentinelZero})) {
+ Shuffle = X86ISD::VZEXT_MOVL;
+ ShuffleVT = MVT::v2i64;
+ return true;
+ }
+
+ // Check if we have SSE3 which will let us use MOVDDUP etc. The
+ // instructions are no slower than UNPCKLPD but has the option to
+ // fold the input operand into even an unaligned memory load.
+ if (SrcVT.is128BitVector() && Subtarget.hasSSE3() && FloatDomain) {
+ if (isTargetShuffleEquivalent(Mask, {0, 0})) {
+ Shuffle = X86ISD::MOVDDUP;
+ ShuffleVT = MVT::v2f64;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
+ Shuffle = X86ISD::MOVSLDUP;
+ ShuffleVT = MVT::v4f32;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3})) {
+ Shuffle = X86ISD::MOVSHDUP;
+ ShuffleVT = MVT::v4f32;
+ return true;
+ }
+ }
+
+ if (SrcVT.is256BitVector() && FloatDomain) {
+ assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
+ if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
+ Shuffle = X86ISD::MOVDDUP;
+ ShuffleVT = MVT::v4f64;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
+ Shuffle = X86ISD::MOVSLDUP;
+ ShuffleVT = MVT::v8f32;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3, 5, 5, 7, 7})) {
+ Shuffle = X86ISD::MOVSHDUP;
+ ShuffleVT = MVT::v8f32;
+ return true;
+ }
+ }
+
+ if (SrcVT.is512BitVector() && FloatDomain) {
+ assert(Subtarget.hasAVX512() &&
+ "AVX512 required for 512-bit vector shuffles");
+ if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
+ Shuffle = X86ISD::MOVDDUP;
+ ShuffleVT = MVT::v8f64;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(
+ Mask, {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14})) {
+ Shuffle = X86ISD::MOVSLDUP;
+ ShuffleVT = MVT::v16f32;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(
+ Mask, {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15})) {
+ Shuffle = X86ISD::MOVSHDUP;
+ ShuffleVT = MVT::v16f32;
+ return true;
+ }
+ }
+
+ // Attempt to match against broadcast-from-vector.
+ if (Subtarget.hasAVX2()) {
+ unsigned NumElts = Mask.size();
+ SmallVector<int, 64> BroadcastMask(NumElts, 0);
+ if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
+ unsigned EltSize = SrcVT.getSizeInBits() / NumElts;
+ ShuffleVT = FloatDomain ? MVT::getFloatingPointVT(EltSize)
+ : MVT::getIntegerVT(EltSize);
+ ShuffleVT = MVT::getVectorVT(ShuffleVT, NumElts);
+ Shuffle = X86ISD::VBROADCAST;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// Attempt to match a combined shuffle mask against supported unary immediate
+// permute instructions.
+// TODO: Investigate sharing more of this with shuffle lowering.
+static bool matchPermuteVectorShuffle(MVT SrcVT, ArrayRef<int> Mask,
+ const X86Subtarget &Subtarget,
+ unsigned &Shuffle, MVT &ShuffleVT,
+ unsigned &PermuteImm) {
+ // Ensure we don't contain any zero elements.
+ for (int M : Mask) {
+ if (M == SM_SentinelZero)
+ return false;
+ assert(SM_SentinelUndef <= M && M < (int)Mask.size() &&
+ "Expected unary shuffle");
+ }
+
+ unsigned MaskScalarSizeInBits = SrcVT.getSizeInBits() / Mask.size();
+ MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
+
+ // Handle PSHUFLW/PSHUFHW repeated patterns.
+ if (MaskScalarSizeInBits == 16) {
+ SmallVector<int, 4> RepeatedMask;
+ if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
+ ArrayRef<int> LoMask(Mask.data() + 0, 4);
+ ArrayRef<int> HiMask(Mask.data() + 4, 4);
+
+ // PSHUFLW: permute lower 4 elements only.
+ if (isUndefOrInRange(LoMask, 0, 4) &&
+ isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
+ Shuffle = X86ISD::PSHUFLW;
+ ShuffleVT = MVT::getVectorVT(MVT::i16, SrcVT.getSizeInBits() / 16);
+ PermuteImm = getV4X86ShuffleImm(LoMask);
+ return true;
+ }
+
+ // PSHUFHW: permute upper 4 elements only.
+ if (isUndefOrInRange(HiMask, 4, 8) &&
+ isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
+ // Offset the HiMask so that we can create the shuffle immediate.
+ int OffsetHiMask[4];
+ for (int i = 0; i != 4; ++i)
+ OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
+
+ Shuffle = X86ISD::PSHUFHW;
+ ShuffleVT = MVT::getVectorVT(MVT::i16, SrcVT.getSizeInBits() / 16);
+ PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
+ return true;
+ }
+
+ return false;
+ }
+ return false;
+ }
+
+ // We only support permutation of 32/64 bit elements after this.
+ if (MaskScalarSizeInBits != 32 && MaskScalarSizeInBits != 64)
+ return false;
+
+ // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
+ // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
+ bool FloatDomain = SrcVT.isFloatingPoint();
+ if (FloatDomain && !Subtarget.hasAVX())
+ return false;
+
+ // Pre-AVX2 we must use float shuffles on 256-bit vectors.
+ if (SrcVT.is256BitVector() && !Subtarget.hasAVX2())
+ FloatDomain = true;
+
+ // Check for lane crossing permutes.
+ if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
+ // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
+ if (Subtarget.hasAVX2() && SrcVT.is256BitVector() && Mask.size() == 4) {
+ Shuffle = X86ISD::VPERMI;
+ ShuffleVT = (FloatDomain ? MVT::v4f64 : MVT::v4i64);
+ PermuteImm = getV4X86ShuffleImm(Mask);
+ return true;
+ }
+ if (Subtarget.hasAVX512() && SrcVT.is512BitVector() && Mask.size() == 8) {
+ SmallVector<int, 4> RepeatedMask;
+ if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
+ Shuffle = X86ISD::VPERMI;
+ ShuffleVT = (FloatDomain ? MVT::v8f64 : MVT::v8i64);
+ PermuteImm = getV4X86ShuffleImm(RepeatedMask);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // VPERMILPD can permute with a non-repeating shuffle.
+ if (FloatDomain && MaskScalarSizeInBits == 64) {
+ Shuffle = X86ISD::VPERMILPI;
+ ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
+ PermuteImm = 0;
+ for (int i = 0, e = Mask.size(); i != e; ++i) {
+ int M = Mask[i];
+ if (M == SM_SentinelUndef)
+ continue;
+ assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
+ PermuteImm |= (M & 1) << i;
+ }
+ return true;
+ }
+
+ // We need a repeating shuffle mask for VPERMILPS/PSHUFD.
+ SmallVector<int, 4> RepeatedMask;
+ if (!is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask))
+ return false;
+
+ // Narrow the repeated mask for 32-bit element permutes.
+ SmallVector<int, 4> WordMask = RepeatedMask;
+ if (MaskScalarSizeInBits == 64)
+ scaleShuffleMask(2, RepeatedMask, WordMask);
+
+ Shuffle = (FloatDomain ? X86ISD::VPERMILPI : X86ISD::PSHUFD);
+ ShuffleVT = (FloatDomain ? MVT::f32 : MVT::i32);
+ ShuffleVT = MVT::getVectorVT(ShuffleVT, SrcVT.getSizeInBits() / 32);
+ PermuteImm = getV4X86ShuffleImm(WordMask);
+ return true;
+}
+
+// Attempt to match a combined unary shuffle mask against supported binary
+// shuffle instructions.
+// TODO: Investigate sharing more of this with shuffle lowering.
+static bool matchBinaryVectorShuffle(MVT SrcVT, ArrayRef<int> Mask,
+ unsigned &Shuffle, MVT &ShuffleVT) {
+ bool FloatDomain = SrcVT.isFloatingPoint();
+
+ if (SrcVT.is128BitVector()) {
+ if (isTargetShuffleEquivalent(Mask, {0, 0}) && FloatDomain) {
+ Shuffle = X86ISD::MOVLHPS;
+ ShuffleVT = MVT::v4f32;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {1, 1}) && FloatDomain) {
+ Shuffle = X86ISD::MOVHLPS;
+ ShuffleVT = MVT::v4f32;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {0, 0, 1, 1}) && FloatDomain) {
+ Shuffle = X86ISD::UNPCKL;
+ ShuffleVT = MVT::v4f32;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {2, 2, 3, 3}) && FloatDomain) {
+ Shuffle = X86ISD::UNPCKH;
+ ShuffleVT = MVT::v4f32;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {0, 0, 1, 1, 2, 2, 3, 3}) ||
+ isTargetShuffleEquivalent(
+ Mask, {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7})) {
+ Shuffle = X86ISD::UNPCKL;
+ ShuffleVT = Mask.size() == 8 ? MVT::v8i16 : MVT::v16i8;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {4, 4, 5, 5, 6, 6, 7, 7}) ||
+ isTargetShuffleEquivalent(Mask, {8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13,
+ 13, 14, 14, 15, 15})) {
+ Shuffle = X86ISD::UNPCKH;
+ ShuffleVT = Mask.size() == 8 ? MVT::v8i16 : MVT::v16i8;
+ return true;
+ }
+ }
+
+ return false;
+}
+
/// \brief Combine an arbitrary chain of shuffles into a single instruction if
/// possible.
///
-/// This is the leaf of the recursive combinine below. When we have found some
+/// This is the leaf of the recursive combine below. When we have found some
/// chain of single-use x86 shuffle instructions and accumulated the combined
/// shuffle mask represented by them, this will try to pattern match that mask
/// into either a single instruction if there is a special purpose instruction
/// for this operation, or into a PSHUFB instruction which is a fully general
/// instruction but should only be used to replace chains over a certain depth.
-static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
- int Depth, bool HasPSHUFB, SelectionDAG &DAG,
+static bool combineX86ShuffleChain(SDValue Input, SDValue Root,
+ ArrayRef<int> BaseMask, int Depth,
+ bool HasVariableMask, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
- assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
+ const X86Subtarget &Subtarget) {
+ assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
// Find the operand that enters the chain. Note that multiple uses are OK
// here, we're not going to remove the operand we find.
- SDValue Input = Op.getOperand(0);
- while (Input.getOpcode() == ISD::BITCAST)
- Input = Input.getOperand(0);
+ Input = peekThroughBitcasts(Input);
MVT VT = Input.getSimpleValueType();
MVT RootVT = Root.getSimpleValueType();
SDLoc DL(Root);
- if (Mask.size() == 1) {
- int Index = Mask[0];
- assert((Index >= 0 || Index == SM_SentinelUndef ||
- Index == SM_SentinelZero) &&
- "Invalid shuffle index found!");
-
- // We may end up with an accumulated mask of size 1 as a result of
- // widening of shuffle operands (see function canWidenShuffleElements).
- // If the only shuffle index is equal to SM_SentinelZero then propagate
- // a zero vector. Otherwise, the combine shuffle mask is a no-op shuffle
- // mask, and therefore the entire chain of shuffles can be folded away.
- if (Index == SM_SentinelZero)
- DCI.CombineTo(Root.getNode(), getZeroVector(RootVT, Subtarget, DAG, DL));
- else
- DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Input),
- /*AddTo*/ true);
+ SDValue Res;
+
+ unsigned NumBaseMaskElts = BaseMask.size();
+ if (NumBaseMaskElts == 1) {
+ assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Input),
+ /*AddTo*/ true);
return true;
}
- // Use the float domain if the operand type is a floating point type.
- bool FloatDomain = VT.isFloatingPoint();
+ unsigned RootSizeInBits = RootVT.getSizeInBits();
+ unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
- // For floating point shuffles, we don't have free copies in the shuffle
- // instructions or the ability to load as part of the instruction, so
- // canonicalize their shuffles to UNPCK or MOV variants.
- //
- // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
- // vectors because it can have a load folded into it that UNPCK cannot. This
- // doesn't preclude something switching to the shorter encoding post-RA.
- //
- // FIXME: Should teach these routines about AVX vector widths.
- if (FloatDomain && VT.is128BitVector()) {
- if (Mask.equals({0, 0}) || Mask.equals({1, 1})) {
- bool Lo = Mask.equals({0, 0});
- unsigned Shuffle;
- MVT ShuffleVT;
- // Check if we have SSE3 which will let us use MOVDDUP. That instruction
- // is no slower than UNPCKLPD but has the option to fold the input operand
- // into even an unaligned memory load.
- if (Lo && Subtarget->hasSSE3()) {
- Shuffle = X86ISD::MOVDDUP;
- ShuffleVT = MVT::v2f64;
- } else {
- // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
- // than the UNPCK variants.
- Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
- ShuffleVT = MVT::v4f32;
- }
- if (Depth == 1 && Root->getOpcode() == Shuffle)
- return false; // Nothing to do!
- Op = DAG.getBitcast(ShuffleVT, Input);
- DCI.AddToWorklist(Op.getNode());
- if (Shuffle == X86ISD::MOVDDUP)
- Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
- else
- Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
- DCI.AddToWorklist(Op.getNode());
- DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Op),
- /*AddTo*/ true);
- return true;
- }
- if (Subtarget->hasSSE3() &&
- (Mask.equals({0, 0, 2, 2}) || Mask.equals({1, 1, 3, 3}))) {
- bool Lo = Mask.equals({0, 0, 2, 2});
- unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
- MVT ShuffleVT = MVT::v4f32;
- if (Depth == 1 && Root->getOpcode() == Shuffle)
- return false; // Nothing to do!
- Op = DAG.getBitcast(ShuffleVT, Input);
- DCI.AddToWorklist(Op.getNode());
- Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
- DCI.AddToWorklist(Op.getNode());
- DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Op),
+ // Don't combine if we are a AVX512/EVEX target and the mask element size
+ // is different from the root element size - this would prevent writemasks
+ // from being reused.
+ // TODO - this currently prevents all lane shuffles from occurring.
+ // TODO - check for writemasks usage instead of always preventing combining.
+ // TODO - attempt to narrow Mask back to writemask size.
+ if (RootVT.getScalarSizeInBits() != BaseMaskEltSizeInBits &&
+ (RootSizeInBits == 512 ||
+ (Subtarget.hasVLX() && RootSizeInBits >= 128))) {
+ return false;
+ }
+
+ // TODO - handle 128/256-bit lane shuffles of 512-bit vectors.
+
+ // Handle 128-bit lane shuffles of 256-bit vectors.
+ if (VT.is256BitVector() && NumBaseMaskElts == 2 &&
+ !isSequentialOrUndefOrZeroInRange(BaseMask, 0, 2, 0)) {
+ if (Depth == 1 && Root.getOpcode() == X86ISD::VPERM2X128)
+ return false; // Nothing to do!
+ MVT ShuffleVT = (VT.isFloatingPoint() || !Subtarget.hasAVX2() ? MVT::v4f64
+ : MVT::v4i64);
+ unsigned PermMask = 0;
+ PermMask |= ((BaseMask[0] < 0 ? 0x8 : (BaseMask[0] & 1)) << 0);
+ PermMask |= ((BaseMask[1] < 0 ? 0x8 : (BaseMask[1] & 1)) << 4);
+
+ Res = DAG.getBitcast(ShuffleVT, Input);
+ DCI.AddToWorklist(Res.getNode());
+ Res = DAG.getNode(X86ISD::VPERM2X128, DL, ShuffleVT, Res,
+ DAG.getUNDEF(ShuffleVT),
+ DAG.getConstant(PermMask, DL, MVT::i8));
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
+ /*AddTo*/ true);
+ return true;
+ }
+
+ // For masks that have been widened to 128-bit elements or more,
+ // narrow back down to 64-bit elements.
+ SmallVector<int, 64> Mask;
+ if (BaseMaskEltSizeInBits > 64) {
+ assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
+ int MaskScale = BaseMaskEltSizeInBits / 64;
+ scaleShuffleMask(MaskScale, BaseMask, Mask);
+ } else {
+ Mask = SmallVector<int, 64>(BaseMask.begin(), BaseMask.end());
+ }
+
+ unsigned NumMaskElts = Mask.size();
+ unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
+
+ // Determine the effective mask value type.
+ bool FloatDomain =
+ (VT.isFloatingPoint() || (VT.is256BitVector() && !Subtarget.hasAVX2())) &&
+ (32 <= MaskEltSizeInBits);
+ MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
+ : MVT::getIntegerVT(MaskEltSizeInBits);
+ MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
+
+ // Attempt to match the mask against known shuffle patterns.
+ MVT ShuffleVT;
+ unsigned Shuffle, PermuteImm;
+
+ if (matchUnaryVectorShuffle(VT, Mask, Subtarget, Shuffle, ShuffleVT)) {
+ if (Depth == 1 && Root.getOpcode() == Shuffle)
+ return false; // Nothing to do!
+ Res = DAG.getBitcast(ShuffleVT, Input);
+ DCI.AddToWorklist(Res.getNode());
+ Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
+ /*AddTo*/ true);
+ return true;
+ }
+
+ if (matchPermuteVectorShuffle(VT, Mask, Subtarget, Shuffle, ShuffleVT,
+ PermuteImm)) {
+ if (Depth == 1 && Root.getOpcode() == Shuffle)
+ return false; // Nothing to do!
+ Res = DAG.getBitcast(ShuffleVT, Input);
+ DCI.AddToWorklist(Res.getNode());
+ Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
+ DAG.getConstant(PermuteImm, DL, MVT::i8));
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
+ /*AddTo*/ true);
+ return true;
+ }
+
+ if (matchBinaryVectorShuffle(VT, Mask, Shuffle, ShuffleVT)) {
+ if (Depth == 1 && Root.getOpcode() == Shuffle)
+ return false; // Nothing to do!
+ Res = DAG.getBitcast(ShuffleVT, Input);
+ DCI.AddToWorklist(Res.getNode());
+ Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res, Res);
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
+ /*AddTo*/ true);
+ return true;
+ }
+
+ // Attempt to blend with zero.
+ if (NumMaskElts <= 8 &&
+ ((Subtarget.hasSSE41() && VT.is128BitVector()) ||
+ (Subtarget.hasAVX() && VT.is256BitVector()))) {
+ // Convert VT to a type compatible with X86ISD::BLENDI.
+ // TODO - add 16i16 support (requires lane duplication).
+ MVT ShuffleVT = MaskVT;
+ if (Subtarget.hasAVX2()) {
+ if (ShuffleVT == MVT::v4i64)
+ ShuffleVT = MVT::v8i32;
+ else if (ShuffleVT == MVT::v2i64)
+ ShuffleVT = MVT::v4i32;
+ } else {
+ if (ShuffleVT == MVT::v2i64 || ShuffleVT == MVT::v4i32)
+ ShuffleVT = MVT::v8i16;
+ else if (ShuffleVT == MVT::v4i64)
+ ShuffleVT = MVT::v4f64;
+ else if (ShuffleVT == MVT::v8i32)
+ ShuffleVT = MVT::v8f32;
+ }
+
+ if (isSequentialOrUndefOrZeroInRange(Mask, /*Pos*/ 0, /*Size*/ NumMaskElts,
+ /*Low*/ 0) &&
+ NumMaskElts <= ShuffleVT.getVectorNumElements()) {
+ unsigned BlendMask = 0;
+ unsigned ShuffleSize = ShuffleVT.getVectorNumElements();
+ unsigned MaskRatio = ShuffleSize / NumMaskElts;
+
+ if (Depth == 1 && Root.getOpcode() == X86ISD::BLENDI)
+ return false;
+
+ for (unsigned i = 0; i != ShuffleSize; ++i)
+ if (Mask[i / MaskRatio] < 0)
+ BlendMask |= 1u << i;
+
+ SDValue Zero = getZeroVector(ShuffleVT, Subtarget, DAG, DL);
+ Res = DAG.getBitcast(ShuffleVT, Input);
+ DCI.AddToWorklist(Res.getNode());
+ Res = DAG.getNode(X86ISD::BLENDI, DL, ShuffleVT, Res, Zero,
+ DAG.getConstant(BlendMask, DL, MVT::i8));
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
/*AddTo*/ true);
return true;
}
- if (Mask.equals({0, 0, 1, 1}) || Mask.equals({2, 2, 3, 3})) {
- bool Lo = Mask.equals({0, 0, 1, 1});
- unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
- MVT ShuffleVT = MVT::v4f32;
- if (Depth == 1 && Root->getOpcode() == Shuffle)
+ }
+
+ // Attempt to combine to INSERTPS.
+ if (Subtarget.hasSSE41() && NumMaskElts == 4 &&
+ (VT == MVT::v2f64 || VT == MVT::v4f32)) {
+ SmallBitVector Zeroable(4, false);
+ for (unsigned i = 0; i != NumMaskElts; ++i)
+ if (Mask[i] < 0)
+ Zeroable[i] = true;
+
+ unsigned InsertPSMask;
+ SDValue V1 = Input, V2 = Input;
+ if (Zeroable.any() && matchVectorShuffleAsInsertPS(V1, V2, InsertPSMask,
+ Zeroable, Mask, DAG)) {
+ if (Depth == 1 && Root.getOpcode() == X86ISD::INSERTPS)
return false; // Nothing to do!
- Op = DAG.getBitcast(ShuffleVT, Input);
- DCI.AddToWorklist(Op.getNode());
- Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
- DCI.AddToWorklist(Op.getNode());
- DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Op),
+ V1 = DAG.getBitcast(MVT::v4f32, V1);
+ DCI.AddToWorklist(V1.getNode());
+ V2 = DAG.getBitcast(MVT::v4f32, V2);
+ DCI.AddToWorklist(V2.getNode());
+ Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
+ DAG.getConstant(InsertPSMask, DL, MVT::i8));
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
/*AddTo*/ true);
return true;
}
}
- // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
- // variants as none of these have single-instruction variants that are
- // superior to the UNPCK formulation.
- if (!FloatDomain && VT.is128BitVector() &&
- (Mask.equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
- Mask.equals({4, 4, 5, 5, 6, 6, 7, 7}) ||
- Mask.equals({0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7}) ||
- Mask.equals(
- {8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15}))) {
- bool Lo = Mask[0] == 0;
- unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
- if (Depth == 1 && Root->getOpcode() == Shuffle)
- return false; // Nothing to do!
- MVT ShuffleVT;
- switch (Mask.size()) {
- case 8:
- ShuffleVT = MVT::v8i16;
- break;
- case 16:
- ShuffleVT = MVT::v16i8;
- break;
- default:
- llvm_unreachable("Impossible mask size!");
- };
- Op = DAG.getBitcast(ShuffleVT, Input);
- DCI.AddToWorklist(Op.getNode());
- Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
- DCI.AddToWorklist(Op.getNode());
- DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Op),
- /*AddTo*/ true);
- return true;
- }
-
// Don't try to re-form single instruction chains under any circumstances now
// that we've done encoding canonicalization for them.
if (Depth < 2)
return false;
- // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
- // can replace them with a single PSHUFB instruction profitably. Intel's
- // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
- // in practice PSHUFB tends to be *very* fast so we're more aggressive.
- if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
+ if (is128BitLaneCrossingShuffleMask(MaskVT, Mask))
+ return false;
+
+ bool MaskContainsZeros =
+ llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
+
+ // If we have a single input shuffle with different shuffle patterns in the
+ // the 128-bit lanes use the variable mask to VPERMILPS.
+ // TODO Combine other mask types at higher depths.
+ if (HasVariableMask && !MaskContainsZeros &&
+ ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
+ (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
+ SmallVector<SDValue, 16> VPermIdx;
+ for (int M : Mask) {
+ SDValue Idx =
+ M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
+ VPermIdx.push_back(Idx);
+ }
+ MVT VPermMaskVT = MVT::getVectorVT(MVT::i32, NumMaskElts);
+ SDValue VPermMask = DAG.getBuildVector(VPermMaskVT, DL, VPermIdx);
+ DCI.AddToWorklist(VPermMask.getNode());
+ Res = DAG.getBitcast(MaskVT, Input);
+ DCI.AddToWorklist(Res.getNode());
+ Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
+ /*AddTo*/ true);
+ return true;
+ }
+
+ // If we have 3 or more shuffle instructions or a chain involving a variable
+ // mask, we can replace them with a single PSHUFB instruction profitably.
+ // Intel's manuals suggest only using PSHUFB if doing so replacing 5
+ // instructions, but in practice PSHUFB tends to be *very* fast so we're
+ // more aggressive.
+ if ((Depth >= 3 || HasVariableMask) &&
+ ((VT.is128BitVector() && Subtarget.hasSSSE3()) ||
+ (VT.is256BitVector() && Subtarget.hasAVX2()) ||
+ (VT.is512BitVector() && Subtarget.hasBWI()))) {
SmallVector<SDValue, 16> PSHUFBMask;
int NumBytes = VT.getSizeInBits() / 8;
- int Ratio = NumBytes / Mask.size();
+ int Ratio = NumBytes / NumMaskElts;
for (int i = 0; i < NumBytes; ++i) {
- if (Mask[i / Ratio] == SM_SentinelUndef) {
+ int M = Mask[i / Ratio];
+ if (M == SM_SentinelUndef) {
PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
continue;
}
- int M = Mask[i / Ratio] != SM_SentinelZero
- ? Ratio * Mask[i / Ratio] + i % Ratio
- : 255;
+ if (M == SM_SentinelZero) {
+ PSHUFBMask.push_back(DAG.getConstant(255, DL, MVT::i8));
+ continue;
+ }
+ M = Ratio * M + i % Ratio;
+ assert ((M / 16) == (i / 16) && "Lane crossing detected");
PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
}
MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
- Op = DAG.getBitcast(ByteVT, Input);
- DCI.AddToWorklist(Op.getNode());
- SDValue PSHUFBMaskOp =
- DAG.getNode(ISD::BUILD_VECTOR, DL, ByteVT, PSHUFBMask);
+ Res = DAG.getBitcast(ByteVT, Input);
+ DCI.AddToWorklist(Res.getNode());
+ SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
DCI.AddToWorklist(PSHUFBMaskOp.getNode());
- Op = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Op, PSHUFBMaskOp);
- DCI.AddToWorklist(Op.getNode());
- DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Op),
+ Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
/*AddTo*/ true);
return true;
}
@@ -23288,10 +25333,10 @@ static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
/// combining in this recursive walk.
static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
ArrayRef<int> RootMask,
- int Depth, bool HasPSHUFB,
+ int Depth, bool HasVariableMask,
SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
// Bound the depth of our recursive combine because this is ultimately
// quadratic in nature.
if (Depth > 8)
@@ -23310,13 +25355,10 @@ static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
"Can only combine shuffles of the same vector register size.");
- if (!isTargetShuffle(Op.getOpcode()))
- return false;
+ // Extract target shuffle mask and resolve sentinels and inputs.
+ SDValue Input0, Input1;
SmallVector<int, 16> OpMask;
- bool IsUnary;
- bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, true, OpMask, IsUnary);
- // We only can combine unary shuffles which we can decode the mask for.
- if (!HaveMask || !IsUnary)
+ if (!resolveTargetShuffleInputs(Op, Input0, Input1, OpMask))
return false;
assert(VT.getVectorNumElements() == OpMask.size() &&
@@ -23327,6 +25369,7 @@ static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
OpMask.size() % RootMask.size() == 0) ||
OpMask.size() == RootMask.size()) &&
"The smaller number of elements must divide the larger.");
+ int MaskWidth = std::max<int>(OpMask.size(), RootMask.size());
int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
assert(((RootRatio == 1 && OpRatio == 1) ||
@@ -23334,13 +25377,13 @@ static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
"Must not have a ratio for both incoming and op masks!");
SmallVector<int, 16> Mask;
- Mask.reserve(std::max(OpMask.size(), RootMask.size()));
+ Mask.reserve(MaskWidth);
// Merge this shuffle operation's mask into our accumulated mask. Note that
// this shuffle's mask will be the first applied to the input, followed by the
// root mask to get us all the way to the root value arrangement. The reason
// for this order is that we are recursing up the operation chain.
- for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
+ for (int i = 0; i < MaskWidth; ++i) {
int RootIdx = i / RootRatio;
if (RootMask[RootIdx] < 0) {
// This is a zero or undef lane, we're done.
@@ -23362,45 +25405,56 @@ static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
RootMaskedIdx % OpRatio);
}
- // See if we can recurse into the operand to combine more things.
- switch (Op.getOpcode()) {
- case X86ISD::PSHUFB:
- HasPSHUFB = true;
- case X86ISD::PSHUFD:
- case X86ISD::PSHUFHW:
- case X86ISD::PSHUFLW:
- if (Op.getOperand(0).hasOneUse() &&
- combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
- HasPSHUFB, DAG, DCI, Subtarget))
- return true;
- break;
+ // Handle the all undef/zero cases early.
+ if (llvm::all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; })) {
+ DCI.CombineTo(Root.getNode(), DAG.getUNDEF(Root.getValueType()));
+ return true;
+ }
+ if (llvm::all_of(Mask, [](int Idx) { return Idx < 0; })) {
+ // TODO - should we handle the mixed zero/undef case as well? Just returning
+ // a zero mask will lose information on undef elements possibly reducing
+ // future combine possibilities.
+ DCI.CombineTo(Root.getNode(), getZeroVector(Root.getSimpleValueType(),
+ Subtarget, DAG, SDLoc(Root)));
+ return true;
+ }
- case X86ISD::UNPCKL:
- case X86ISD::UNPCKH:
- assert(Op.getOperand(0) == Op.getOperand(1) &&
- "We only combine unary shuffles!");
- // We can't check for single use, we have to check that this shuffle is the
- // only user.
- if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
- combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
- HasPSHUFB, DAG, DCI, Subtarget))
- return true;
- break;
+ int MaskSize = Mask.size();
+ bool UseInput0 = std::any_of(Mask.begin(), Mask.end(),
+ [MaskSize](int Idx) { return 0 <= Idx && Idx < MaskSize; });
+ bool UseInput1 = std::any_of(Mask.begin(), Mask.end(),
+ [MaskSize](int Idx) { return MaskSize <= Idx; });
+
+ // At the moment we can only combine unary shuffle mask cases.
+ if (UseInput0 && UseInput1)
+ return false;
+ else if (UseInput1) {
+ std::swap(Input0, Input1);
+ ShuffleVectorSDNode::commuteMask(Mask);
}
+ assert(Input0 && "Shuffle with no inputs detected");
+
+ HasVariableMask |= isTargetShuffleVariableMask(Op.getOpcode());
+
+ // See if we can recurse into Input0 (if it's a target shuffle).
+ if (Op->isOnlyUserOf(Input0.getNode()) &&
+ combineX86ShufflesRecursively(Input0, Root, Mask, Depth + 1,
+ HasVariableMask, DAG, DCI, Subtarget))
+ return true;
+
// Minor canonicalization of the accumulated shuffle mask to make it easier
- // to match below. All this does is detect masks with squential pairs of
+ // to match below. All this does is detect masks with sequential pairs of
// elements, and shrink them to the half-width mask. It does this in a loop
// so it will reduce the size of the mask to the minimal width mask which
// performs an equivalent shuffle.
SmallVector<int, 16> WidenedMask;
while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
Mask = std::move(WidenedMask);
- WidenedMask.clear();
}
- return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
- Subtarget);
+ return combineX86ShuffleChain(Input0, Root, Mask, Depth, HasVariableMask, DAG,
+ DCI, Subtarget);
}
/// \brief Get the PSHUF-style mask from PSHUF node.
@@ -23410,8 +25464,10 @@ static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
MVT VT = N.getSimpleValueType();
SmallVector<int, 4> Mask;
+ SmallVector<SDValue, 2> Ops;
bool IsUnary;
- bool HaveMask = getTargetShuffleMask(N.getNode(), VT, false, Mask, IsUnary);
+ bool HaveMask =
+ getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary);
(void)HaveMask;
assert(HaveMask);
@@ -23647,9 +25703,9 @@ static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
}
/// \brief Try to combine x86 target specific shuffles.
-static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDLoc DL(N);
MVT VT = N.getSimpleValueType();
SmallVector<int, 4> Mask;
@@ -23681,8 +25737,7 @@ static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
auto Op0 = N.getOperand(0);
auto Op1 = N.getOperand(1);
- if (Op0.getOpcode() == ISD::UNDEF &&
- Op1.getNode()->getOpcode() == ISD::VECTOR_SHUFFLE) {
+ if (Op0.isUndef() && Op1.getNode()->getOpcode() == ISD::VECTOR_SHUFFLE) {
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op1.getNode())->getMask();
unsigned NumElts = VT.getVectorNumElements();
@@ -23719,6 +25774,129 @@ static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V0, NewMask);
}
+ // Attempt to merge blend(insertps(x,y),zero).
+ if (V0.getOpcode() == X86ISD::INSERTPS ||
+ V1.getOpcode() == X86ISD::INSERTPS) {
+ assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
+
+ // Determine which elements are known to be zero.
+ SmallVector<int, 8> TargetMask;
+ SmallVector<SDValue, 2> BlendOps;
+ if (!setTargetShuffleZeroElements(N, TargetMask, BlendOps))
+ return SDValue();
+
+ // Helper function to take inner insertps node and attempt to
+ // merge the blend with zero into its zero mask.
+ auto MergeInsertPSAndBlend = [&](SDValue V, int Offset) {
+ if (V.getOpcode() != X86ISD::INSERTPS)
+ return SDValue();
+ SDValue Op0 = V.getOperand(0);
+ SDValue Op1 = V.getOperand(1);
+ SDValue Op2 = V.getOperand(2);
+ unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue();
+
+ // Check each element of the blend node's target mask - must either
+ // be zeroable (and update the zero mask) or selects the element from
+ // the inner insertps node.
+ for (int i = 0; i != 4; ++i)
+ if (TargetMask[i] < 0)
+ InsertPSMask |= (1u << i);
+ else if (TargetMask[i] != (i + Offset))
+ return SDValue();
+ return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, Op0, Op1,
+ DAG.getConstant(InsertPSMask, DL, MVT::i8));
+ };
+
+ if (SDValue V = MergeInsertPSAndBlend(V0, 0))
+ return V;
+ if (SDValue V = MergeInsertPSAndBlend(V1, 4))
+ return V;
+ }
+ return SDValue();
+ }
+ case X86ISD::INSERTPS: {
+ assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
+ SDValue Op0 = N.getOperand(0);
+ SDValue Op1 = N.getOperand(1);
+ SDValue Op2 = N.getOperand(2);
+ unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
+ unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
+ unsigned ZeroMask = InsertPSMask & 0xF;
+
+ // If we zero out all elements from Op0 then we don't need to reference it.
+ if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
+ return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
+ DAG.getConstant(InsertPSMask, DL, MVT::i8));
+
+ // If we zero out the element from Op1 then we don't need to reference it.
+ if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
+ return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
+ DAG.getConstant(InsertPSMask, DL, MVT::i8));
+
+ // Attempt to merge insertps Op1 with an inner target shuffle node.
+ SmallVector<int, 8> TargetMask1;
+ SmallVector<SDValue, 2> Ops1;
+ if (setTargetShuffleZeroElements(Op1, TargetMask1, Ops1)) {
+ int M = TargetMask1[SrcIdx];
+ if (isUndefOrZero(M)) {
+ // Zero/UNDEF insertion - zero out element and remove dependency.
+ InsertPSMask |= (1u << DstIdx);
+ return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
+ DAG.getConstant(InsertPSMask, DL, MVT::i8));
+ }
+ // Update insertps mask srcidx and reference the source input directly.
+ assert(0 <= M && M < 8 && "Shuffle index out of range");
+ InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
+ Op1 = Ops1[M < 4 ? 0 : 1];
+ return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
+ DAG.getConstant(InsertPSMask, DL, MVT::i8));
+ }
+
+ // Attempt to merge insertps Op0 with an inner target shuffle node.
+ SmallVector<int, 8> TargetMask0;
+ SmallVector<SDValue, 2> Ops0;
+ if (!setTargetShuffleZeroElements(Op0, TargetMask0, Ops0))
+ return SDValue();
+
+ bool Updated = false;
+ bool UseInput00 = false;
+ bool UseInput01 = false;
+ for (int i = 0; i != 4; ++i) {
+ int M = TargetMask0[i];
+ if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
+ // No change if element is already zero or the inserted element.
+ continue;
+ } else if (isUndefOrZero(M)) {
+ // If the target mask is undef/zero then we must zero the element.
+ InsertPSMask |= (1u << i);
+ Updated = true;
+ continue;
+ }
+
+ // The input vector element must be inline.
+ if (M != i && M != (i + 4))
+ return SDValue();
+
+ // Determine which inputs of the target shuffle we're using.
+ UseInput00 |= (0 <= M && M < 4);
+ UseInput01 |= (4 <= M);
+ }
+
+ // If we're not using both inputs of the target shuffle then use the
+ // referenced input directly.
+ if (UseInput00 && !UseInput01) {
+ Updated = true;
+ Op0 = Ops0[0];
+ } else if (!UseInput00 && UseInput01) {
+ Updated = true;
+ Op0 = Ops0[1];
+ }
+
+ if (Updated)
+ return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
+ DAG.getConstant(InsertPSMask, DL, MVT::i8));
+
return SDValue();
}
default:
@@ -23814,12 +25992,12 @@ static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
/// the operands which explicitly discard the lanes which are unused by this
/// operation to try to flow through the rest of the combiner the fact that
/// they're unused.
-static SDValue combineShuffleToAddSub(SDNode *N, const X86Subtarget *Subtarget,
+static SDValue combineShuffleToAddSub(SDNode *N, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc DL(N);
EVT VT = N->getValueType(0);
- if ((!Subtarget->hasSSE3() || (VT != MVT::v4f32 && VT != MVT::v2f64)) &&
- (!Subtarget->hasAVX() || (VT != MVT::v8f32 && VT != MVT::v4f64)))
+ if ((!Subtarget.hasSSE3() || (VT != MVT::v4f32 && VT != MVT::v2f64)) &&
+ (!Subtarget.hasAVX() || (VT != MVT::v8f32 && VT != MVT::v4f64)))
return SDValue();
// We only handle target-independent shuffles.
@@ -23865,13 +26043,10 @@ static SDValue combineShuffleToAddSub(SDNode *N, const X86Subtarget *Subtarget,
return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
}
-/// PerformShuffleCombine - Performs several different shuffle combines.
-static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDLoc dl(N);
- SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
// Don't create instructions with illegal types after legalize types has run.
@@ -23886,9 +26061,9 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
return AddSub;
// Combine 256-bit vector shuffles. This is only profitable when in AVX mode
- if (TLI.isTypeLegal(VT) && Subtarget->hasFp256() && VT.is256BitVector() &&
+ if (TLI.isTypeLegal(VT) && Subtarget.hasFp256() && VT.is256BitVector() &&
N->getOpcode() == ISD::VECTOR_SHUFFLE)
- return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
+ return combineShuffle256(N, DAG, DCI, Subtarget);
// During Type Legalization, when promoting illegal vector types,
// the backend might introduce new shuffle dag nodes and bitcasts.
@@ -23903,8 +26078,12 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
// potentially need to be further expanded (or custom lowered) into a
// less optimal sequence of dag nodes.
if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
- N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
- N0.getOpcode() == ISD::BITCAST) {
+ N->getOpcode() == ISD::VECTOR_SHUFFLE &&
+ N->getOperand(0).getOpcode() == ISD::BITCAST &&
+ N->getOperand(1).isUndef() && N->getOperand(0).hasOneUse()) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+
SDValue BC0 = N0.getOperand(0);
EVT SVT = BC0.getValueType();
unsigned Opcode = BC0.getOpcode();
@@ -23936,7 +26115,7 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
SDValue BC00 = DAG.getBitcast(VT, BC0.getOperand(0));
SDValue BC01 = DAG.getBitcast(VT, BC0.getOperand(1));
SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
- return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
+ return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, SVOp->getMask());
}
}
}
@@ -23952,9 +26131,8 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
return LD;
if (isTargetShuffle(N->getOpcode())) {
- SDValue Shuffle =
- PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
- if (Shuffle.getNode())
+ if (SDValue Shuffle =
+ combineTargetShuffle(SDValue(N, 0), DAG, DCI, Subtarget))
return Shuffle;
// Try recursively combining arbitrary sequences of x86 shuffle
@@ -23973,8 +26151,8 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-/// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
-/// specific shuffle of a load can be folded into a single element load.
+/// Check if a vector extract from a target-specific shuffle of a load can be
+/// folded into a single element load.
/// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
/// shuffles have been custom lowered so we need to handle those here.
static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
@@ -24012,9 +26190,10 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
return SDValue();
SmallVector<int, 16> ShuffleMask;
+ SmallVector<SDValue, 2> ShuffleOps;
bool UnaryShuffle;
if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(), true,
- ShuffleMask, UnaryShuffle))
+ ShuffleOps, ShuffleMask, UnaryShuffle))
return SDValue();
// Select the input vector, guarding against out of range extract vector.
@@ -24029,12 +26208,12 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
return DAG.getUNDEF(EltVT);
assert(0 <= Idx && Idx < (int)(2 * NumElems) && "Shuffle index out of range");
- SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
- : InVec.getOperand(1);
+ SDValue LdNode = (Idx < (int)NumElems) ? ShuffleOps[0]
+ : ShuffleOps[1];
// If inputs to shuffle are the same for both ops, then allow 2 uses
- unsigned AllowedUses = InVec.getNumOperands() > 1 &&
- InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
+ unsigned AllowedUses =
+ (ShuffleOps.size() > 1 && ShuffleOps[0] == ShuffleOps[1]) ? 2 : 1;
if (LdNode.getOpcode() == ISD::BITCAST) {
// Don't duplicate a load with other uses.
@@ -24068,18 +26247,16 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
SDLoc dl(N);
// Create shuffle node taking into account the case that its a unary shuffle
- SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
- : InVec.getOperand(1);
- Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
- InVec.getOperand(0), Shuffle,
- &ShuffleMask[0]);
+ SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT) : ShuffleOps[1];
+ Shuffle = DAG.getVectorShuffle(CurrentVT, dl, ShuffleOps[0], Shuffle,
+ ShuffleMask);
Shuffle = DAG.getBitcast(OriginalVT, Shuffle);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
EltNo);
}
-static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
@@ -24108,8 +26285,8 @@ static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG,
case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
default: return SDValue();
}
- if (((Subtarget->hasSSE1() && VT == MVT::f32) ||
- (Subtarget->hasSSE2() && VT == MVT::f64)) &&
+ if (((Subtarget.hasSSE1() && VT == MVT::f32) ||
+ (Subtarget.hasSSE2() && VT == MVT::f64)) &&
isa<ConstantSDNode>(N0.getOperand(1)) &&
N0.getOperand(0).getOpcode() == ISD::BITCAST &&
N0.getOperand(0).getOperand(0).getValueType() == VT) {
@@ -24121,13 +26298,12 @@ static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
-/// generation and convert it from being a bunch of shuffles and extracts
-/// into a somewhat faster sequence. For i686, the best sequence is apparently
-/// storing the value and loading scalars back, while for x64 we should
-/// use 64-bit extracts and shifts.
-static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI) {
+/// Detect vector gather/scatter index generation and convert it from being a
+/// bunch of shuffles and extracts into a somewhat faster sequence.
+/// For i686, the best sequence is apparently storing the value and loading
+/// scalars back, while for x64 we should use 64-bit extracts and shifts.
+static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI) {
if (SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI))
return NewOp;
@@ -24136,25 +26312,14 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
// Detect mmx to i32 conversion through a v2i32 elt extract.
if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
N->getValueType(0) == MVT::i32 &&
- InputVector.getValueType() == MVT::v2i32) {
+ InputVector.getValueType() == MVT::v2i32 &&
+ isa<ConstantSDNode>(N->getOperand(1)) &&
+ N->getConstantOperandVal(1) == 0) {
+ SDValue MMXSrc = InputVector.getNode()->getOperand(0);
// The bitcast source is a direct mmx result.
- SDValue MMXSrc = InputVector.getNode()->getOperand(0);
if (MMXSrc.getValueType() == MVT::x86mmx)
- return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
- N->getValueType(0),
- InputVector.getNode()->getOperand(0));
-
- // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
- if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
- MMXSrc.getValueType() == MVT::i64) {
- SDValue MMXSrcOp = MMXSrc.getOperand(0);
- if (MMXSrcOp.hasOneUse() && MMXSrcOp.getOpcode() == ISD::BITCAST &&
- MMXSrcOp.getValueType() == MVT::v1i64 &&
- MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
- return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
- N->getValueType(0), MMXSrcOp.getOperand(0));
- }
+ return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
}
EVT VT = N->getValueType(0);
@@ -24236,7 +26401,7 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
// Store the value to a temporary stack slot.
SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo());
EVT ElementType = InputVector.getValueType().getVectorElementType();
unsigned EltSize = ElementType.getSizeInBits() / 8;
@@ -24251,10 +26416,8 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, OffsetVal);
// Load the scalar.
- Vals[i] = DAG.getLoad(ElementType, dl, Ch,
- ScalarAddr, MachinePointerInfo(),
- false, false, false, 0);
-
+ Vals[i] =
+ DAG.getLoad(ElementType, dl, Ch, ScalarAddr, MachinePointerInfo());
}
}
@@ -24272,55 +26435,10 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-static SDValue
-transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
- SDLoc dl(N);
- SDValue Cond = N->getOperand(0);
- SDValue LHS = N->getOperand(1);
- SDValue RHS = N->getOperand(2);
-
- if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
- SDValue CondSrc = Cond->getOperand(0);
- if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
- Cond = CondSrc->getOperand(0);
- }
-
- if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
- return SDValue();
-
- // A vselect where all conditions and data are constants can be optimized into
- // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
- if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
- ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
- return SDValue();
-
- unsigned MaskValue = 0;
- if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
- return SDValue();
-
- MVT VT = N->getSimpleValueType(0);
- unsigned NumElems = VT.getVectorNumElements();
- SmallVector<int, 8> ShuffleMask(NumElems, -1);
- for (unsigned i = 0; i < NumElems; ++i) {
- // Be sure we emit undef where we can.
- if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
- ShuffleMask[i] = -1;
- else
- ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
- }
-
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
- return SDValue();
- return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
-}
-
-/// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
-/// nodes.
-static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+/// Do target-specific dag combines on SELECT and VSELECT nodes.
+static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDLoc DL(N);
SDValue Cond = N->getOperand(0);
// Get the LHS/RHS of the select.
@@ -24337,8 +26455,8 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
VT != MVT::f80 && VT != MVT::f128 &&
(TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
- (Subtarget->hasSSE2() ||
- (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
+ (Subtarget.hasSSE2() ||
+ (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
unsigned Opcode = 0;
@@ -24476,7 +26594,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
}
EVT CondVT = Cond.getValueType();
- if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
+ if (Subtarget.hasAVX512() && VT.isVector() && CondVT.isVector() &&
CondVT.getVectorElementType() == MVT::i1) {
// v16i8 (select v16i1, v16i8, v16i8) does not have a proper
// lowering on KNL. In this case we convert it to
@@ -24487,7 +26605,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
(OpVT.getVectorElementType() == MVT::i8 ||
OpVT.getVectorElementType() == MVT::i16) &&
- !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
+ !(Subtarget.hasBWI() && Subtarget.hasVLX())) {
Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
DCI.AddToWorklist(Cond.getNode());
return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
@@ -24625,8 +26743,8 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// Match VSELECTs into subs with unsigned saturation.
if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
// psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
- ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
- (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
+ ((Subtarget.hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
+ (Subtarget.hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
// Check if one of the arms of the VSELECT is a zero vector. If it's on the
@@ -24730,25 +26848,6 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
}
}
- // We should generate an X86ISD::BLENDI from a vselect if its argument
- // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
- // constants. This specific pattern gets generated when we split a
- // selector for a 512 bit vector in a machine without AVX512 (but with
- // 256-bit vectors), during legalization:
- //
- // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
- //
- // Iff we find this pattern and the build_vectors are built from
- // constants, we translate the vselect into a shuffle_vector that we
- // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
- if ((N->getOpcode() == ISD::VSELECT ||
- N->getOpcode() == X86ISD::SHRUNKBLEND) &&
- !DCI.isBeforeLegalize() && !VT.is512BitVector()) {
- SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
- if (Shuffle.getNode())
- return Shuffle;
- }
-
// If this is a *dynamic* select (non-constant condition) and we can match
// this node with one of the variable blend instructions, restructure the
// condition so that the blends can use the high bit of each element and use
@@ -24780,10 +26879,10 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
if (VT.getVectorElementType() == MVT::i16)
return SDValue();
// Dynamic blending was only available from SSE4.1 onward.
- if (VT.is128BitVector() && !Subtarget->hasSSE41())
+ if (VT.is128BitVector() && !Subtarget.hasSSE41())
return SDValue();
// Byte blends are only available in AVX2
- if (VT == MVT::v32i8 && !Subtarget->hasAVX2())
+ if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
return SDValue();
assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
@@ -24837,6 +26936,73 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+/// Combine:
+/// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
+/// to:
+/// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
+/// i.e., reusing the EFLAGS produced by the LOCKed instruction.
+/// Note that this is only legal for some op/cc combinations.
+static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
+ SelectionDAG &DAG) {
+ // This combine only operates on CMP-like nodes.
+ if (!(Cmp.getOpcode() == X86ISD::CMP ||
+ (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
+ return SDValue();
+
+ // This only applies to variations of the common case:
+ // (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
+ // (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
+ // (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
+ // (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
+ // Using the proper condcodes (see below), overflow is checked for.
+
+ // FIXME: We can generalize both constraints:
+ // - XOR/OR/AND (if they were made to survive AtomicExpand)
+ // - LHS != 1
+ // if the result is compared.
+
+ SDValue CmpLHS = Cmp.getOperand(0);
+ SDValue CmpRHS = Cmp.getOperand(1);
+
+ if (!CmpLHS.hasOneUse())
+ return SDValue();
+
+ auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
+ if (!CmpRHSC || CmpRHSC->getZExtValue() != 0)
+ return SDValue();
+
+ const unsigned Opc = CmpLHS.getOpcode();
+
+ if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
+ return SDValue();
+
+ SDValue OpRHS = CmpLHS.getOperand(2);
+ auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
+ if (!OpRHSC)
+ return SDValue();
+
+ APInt Addend = OpRHSC->getAPIntValue();
+ if (Opc == ISD::ATOMIC_LOAD_SUB)
+ Addend = -Addend;
+
+ if (CC == X86::COND_S && Addend == 1)
+ CC = X86::COND_LE;
+ else if (CC == X86::COND_NS && Addend == 1)
+ CC = X86::COND_G;
+ else if (CC == X86::COND_G && Addend == -1)
+ CC = X86::COND_GE;
+ else if (CC == X86::COND_LE && Addend == -1)
+ CC = X86::COND_L;
+ else
+ return SDValue();
+
+ SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG);
+ DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
+ DAG.getUNDEF(CmpLHS.getValueType()));
+ DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
+ return LockOp;
+}
+
// Check whether a boolean test is testing a boolean value generated by
// X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
// code.
@@ -24853,10 +27019,10 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// where Op could be BRCOND or CMOV.
//
static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
- // Quit if not CMP and SUB with its value result used.
- if (Cmp.getOpcode() != X86ISD::CMP &&
- (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
- return SDValue();
+ // This combine only operates on CMP-like nodes.
+ if (!(Cmp.getOpcode() == X86ISD::CMP ||
+ (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
+ return SDValue();
// Quit if not used as a boolean value.
if (CC != X86::COND_E && CC != X86::COND_NE)
@@ -24890,6 +27056,7 @@ static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
// Skip (zext $x), (trunc $x), or (and $x, 1) node.
while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
SetCC.getOpcode() == ISD::TRUNCATE ||
+ SetCC.getOpcode() == ISD::AssertZext ||
SetCC.getOpcode() == ISD::AND) {
if (SetCC.getOpcode() == ISD::AND) {
int OpIdx = -1;
@@ -24897,7 +27064,7 @@ static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
OpIdx = 1;
if (isOneConstant(SetCC.getOperand(1)))
OpIdx = 0;
- if (OpIdx == -1)
+ if (OpIdx < 0)
break;
SetCC = SetCC.getOperand(OpIdx);
truncatedToBoolWithAnd = true;
@@ -25008,10 +27175,20 @@ static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
return true;
}
+/// Optimize an EFLAGS definition used according to the condition code \p CC
+/// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
+/// uses of chain values.
+static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
+ SelectionDAG &DAG) {
+ if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
+ return R;
+ return combineSetCCAtomicArith(EFLAGS, CC, DAG);
+}
+
/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
-static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDLoc DL(N);
// If the flag operand isn't dead, don't touch this CMOV.
@@ -25034,15 +27211,14 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
}
}
- SDValue Flags;
-
- Flags = checkBoolTestSetCCCombine(Cond, CC);
- if (Flags.getNode() &&
- // Extra check as FCMOV only supports a subset of X86 cond.
- (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
- SDValue Ops[] = { FalseOp, TrueOp,
- DAG.getConstant(CC, DL, MVT::i8), Flags };
- return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
+ // Try to simplify the EFLAGS and condition code operands.
+ // We can't always do this as FCMOV only supports a subset of X86 cond.
+ if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG)) {
+ if (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC)) {
+ SDValue Ops[] = {FalseOp, TrueOp, DAG.getConstant(CC, DL, MVT::i8),
+ Flags};
+ return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
+ }
}
// If this is a select between two integer constants, try to do some
@@ -25218,11 +27394,216 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-/// PerformMulCombine - Optimize a single multiply with constant into two
-/// in order to implement it with two cheaper instructions, e.g.
-/// LEA + SHL, LEA + LEA.
-static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI) {
+/// Different mul shrinking modes.
+enum ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
+
+static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
+ EVT VT = N->getOperand(0).getValueType();
+ if (VT.getScalarSizeInBits() != 32)
+ return false;
+
+ assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
+ unsigned SignBits[2] = {1, 1};
+ bool IsPositive[2] = {false, false};
+ for (unsigned i = 0; i < 2; i++) {
+ SDValue Opd = N->getOperand(i);
+
+ // DAG.ComputeNumSignBits return 1 for ISD::ANY_EXTEND, so we need to
+ // compute signbits for it separately.
+ if (Opd.getOpcode() == ISD::ANY_EXTEND) {
+ // For anyextend, it is safe to assume an appropriate number of leading
+ // sign/zero bits.
+ if (Opd.getOperand(0).getValueType().getVectorElementType() == MVT::i8)
+ SignBits[i] = 25;
+ else if (Opd.getOperand(0).getValueType().getVectorElementType() ==
+ MVT::i16)
+ SignBits[i] = 17;
+ else
+ return false;
+ IsPositive[i] = true;
+ } else if (Opd.getOpcode() == ISD::BUILD_VECTOR) {
+ // All the operands of BUILD_VECTOR need to be int constant.
+ // Find the smallest value range which all the operands belong to.
+ SignBits[i] = 32;
+ IsPositive[i] = true;
+ for (const SDValue &SubOp : Opd.getNode()->op_values()) {
+ if (SubOp.isUndef())
+ continue;
+ auto *CN = dyn_cast<ConstantSDNode>(SubOp);
+ if (!CN)
+ return false;
+ APInt IntVal = CN->getAPIntValue();
+ if (IntVal.isNegative())
+ IsPositive[i] = false;
+ SignBits[i] = std::min(SignBits[i], IntVal.getNumSignBits());
+ }
+ } else {
+ SignBits[i] = DAG.ComputeNumSignBits(Opd);
+ if (Opd.getOpcode() == ISD::ZERO_EXTEND)
+ IsPositive[i] = true;
+ }
+ }
+
+ bool AllPositive = IsPositive[0] && IsPositive[1];
+ unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
+ // When ranges are from -128 ~ 127, use MULS8 mode.
+ if (MinSignBits >= 25)
+ Mode = MULS8;
+ // When ranges are from 0 ~ 255, use MULU8 mode.
+ else if (AllPositive && MinSignBits >= 24)
+ Mode = MULU8;
+ // When ranges are from -32768 ~ 32767, use MULS16 mode.
+ else if (MinSignBits >= 17)
+ Mode = MULS16;
+ // When ranges are from 0 ~ 65535, use MULU16 mode.
+ else if (AllPositive && MinSignBits >= 16)
+ Mode = MULU16;
+ else
+ return false;
+ return true;
+}
+
+/// When the operands of vector mul are extended from smaller size values,
+/// like i8 and i16, the type of mul may be shrinked to generate more
+/// efficient code. Two typical patterns are handled:
+/// Pattern1:
+/// %2 = sext/zext <N x i8> %1 to <N x i32>
+/// %4 = sext/zext <N x i8> %3 to <N x i32>
+// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
+/// %5 = mul <N x i32> %2, %4
+///
+/// Pattern2:
+/// %2 = zext/sext <N x i16> %1 to <N x i32>
+/// %4 = zext/sext <N x i16> %3 to <N x i32>
+/// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
+/// %5 = mul <N x i32> %2, %4
+///
+/// There are four mul shrinking modes:
+/// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
+/// -128 to 128, and the scalar value range of %4 is also -128 to 128,
+/// generate pmullw+sext32 for it (MULS8 mode).
+/// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
+/// 0 to 255, and the scalar value range of %4 is also 0 to 255,
+/// generate pmullw+zext32 for it (MULU8 mode).
+/// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
+/// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
+/// generate pmullw+pmulhw for it (MULS16 mode).
+/// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
+/// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
+/// generate pmullw+pmulhuw for it (MULU16 mode).
+static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ // pmulld is supported since SSE41. It is better to use pmulld
+ // instead of pmullw+pmulhw.
+ if (Subtarget.hasSSE41())
+ return SDValue();
+
+ ShrinkMode Mode;
+ if (!canReduceVMulWidth(N, DAG, Mode))
+ return SDValue();
+
+ SDLoc DL(N);
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ EVT VT = N->getOperand(0).getValueType();
+ unsigned RegSize = 128;
+ MVT OpsVT = MVT::getVectorVT(MVT::i16, RegSize / 16);
+ EVT ReducedVT =
+ EVT::getVectorVT(*DAG.getContext(), MVT::i16, VT.getVectorNumElements());
+ // Shrink the operands of mul.
+ SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
+ SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
+
+ if (VT.getVectorNumElements() >= OpsVT.getVectorNumElements()) {
+ // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
+ // lower part is needed.
+ SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
+ if (Mode == MULU8 || Mode == MULS8) {
+ return DAG.getNode((Mode == MULU8) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND,
+ DL, VT, MulLo);
+ } else {
+ MVT ResVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
+ // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
+ // the higher part is also needed.
+ SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
+ ReducedVT, NewN0, NewN1);
+
+ // Repack the lower part and higher part result of mul into a wider
+ // result.
+ // Generate shuffle functioning as punpcklwd.
+ SmallVector<int, 16> ShuffleMask(VT.getVectorNumElements());
+ for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) {
+ ShuffleMask[2 * i] = i;
+ ShuffleMask[2 * i + 1] = i + VT.getVectorNumElements();
+ }
+ SDValue ResLo =
+ DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
+ ResLo = DAG.getNode(ISD::BITCAST, DL, ResVT, ResLo);
+ // Generate shuffle functioning as punpckhwd.
+ for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) {
+ ShuffleMask[2 * i] = i + VT.getVectorNumElements() / 2;
+ ShuffleMask[2 * i + 1] = i + VT.getVectorNumElements() * 3 / 2;
+ }
+ SDValue ResHi =
+ DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
+ ResHi = DAG.getNode(ISD::BITCAST, DL, ResVT, ResHi);
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
+ }
+ } else {
+ // When VT.getVectorNumElements() < OpsVT.getVectorNumElements(), we want
+ // to legalize the mul explicitly because implicit legalization for type
+ // <4 x i16> to <4 x i32> sometimes involves unnecessary unpack
+ // instructions which will not exist when we explicitly legalize it by
+ // extending <4 x i16> to <8 x i16> (concatenating the <4 x i16> val with
+ // <4 x i16> undef).
+ //
+ // Legalize the operands of mul.
+ SmallVector<SDValue, 16> Ops(RegSize / ReducedVT.getSizeInBits(),
+ DAG.getUNDEF(ReducedVT));
+ Ops[0] = NewN0;
+ NewN0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
+ Ops[0] = NewN1;
+ NewN1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
+
+ if (Mode == MULU8 || Mode == MULS8) {
+ // Generate lower part of mul: pmullw. For MULU8/MULS8, only the lower
+ // part is needed.
+ SDValue Mul = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
+
+ // convert the type of mul result to VT.
+ MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
+ SDValue Res = DAG.getNode(Mode == MULU8 ? ISD::ZERO_EXTEND_VECTOR_INREG
+ : ISD::SIGN_EXTEND_VECTOR_INREG,
+ DL, ResVT, Mul);
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
+ DAG.getIntPtrConstant(0, DL));
+ } else {
+ // Generate the lower and higher part of mul: pmulhw/pmulhuw. For
+ // MULU16/MULS16, both parts are needed.
+ SDValue MulLo = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
+ SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
+ OpsVT, NewN0, NewN1);
+
+ // Repack the lower part and higher part result of mul into a wider
+ // result. Make sure the type of mul result is VT.
+ MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
+ SDValue Res = DAG.getNode(X86ISD::UNPCKL, DL, OpsVT, MulLo, MulHi);
+ Res = DAG.getNode(ISD::BITCAST, DL, ResVT, Res);
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
+ DAG.getIntPtrConstant(0, DL));
+ }
+ }
+}
+
+/// Optimize a single multiply with constant into two operations in order to
+/// implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
+static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
+ EVT VT = N->getValueType(0);
+ if (DCI.isBeforeLegalize() && VT.isVector())
+ return reduceVMULWidth(N, DAG, Subtarget);
+
// An imul is usually smaller than the alternative sequence.
if (DAG.getMachineFunction().getFunction()->optForMinSize())
return SDValue();
@@ -25230,7 +27611,6 @@ static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
return SDValue();
- EVT VT = N->getValueType(0);
if (VT != MVT::i64 && VT != MVT::i32)
return SDValue();
@@ -25307,7 +27687,7 @@ static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
@@ -25320,7 +27700,7 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
N0.getOperand(1).getOpcode() == ISD::Constant) {
SDValue N00 = N0.getOperand(0);
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
- APInt ShAmt = N1C->getAPIntValue();
+ const APInt &ShAmt = N1C->getAPIntValue();
Mask = Mask.shl(ShAmt);
bool MaskOK = false;
// We can handle cases concerning bit-widening nodes containing setcc_c if
@@ -25367,7 +27747,7 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
-static SDValue PerformSRACombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue combineShiftRightAlgebraic(SDNode *N, SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
@@ -25424,11 +27804,11 @@ static SDValue PerformSRACombine(SDNode *N, SelectionDAG &DAG) {
/// shift by a constant amount which is known to be bigger than or equal
/// to the vector element size in bits.
static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
- (!Subtarget->hasInt256() ||
+ (!Subtarget.hasInt256() ||
(VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
return SDValue();
@@ -25436,7 +27816,7 @@ static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
SDLoc DL(N);
if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
- APInt ShiftAmt = AmtSplat->getAPIntValue();
+ const APInt &ShiftAmt = AmtSplat->getAPIntValue();
unsigned MaxAmount =
VT.getSimpleVT().getVectorElementType().getSizeInBits();
@@ -25451,16 +27831,15 @@ static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-/// PerformShiftCombine - Combine shifts.
-static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineShift(SDNode* N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
if (N->getOpcode() == ISD::SHL)
- if (SDValue V = PerformSHLCombine(N, DAG))
+ if (SDValue V = combineShiftLeft(N, DAG))
return V;
if (N->getOpcode() == ISD::SRA)
- if (SDValue V = PerformSRACombine(N, DAG))
+ if (SDValue V = combineShiftRightAlgebraic(N, DAG))
return V;
// Try to fold this logical shift into a zero vector.
@@ -25471,17 +27850,17 @@ static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
return SDValue();
}
-// CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
-// where both setccs reference the same FP CMP, and rewrite for CMPEQSS
-// and friends. Likewise for OR -> CMPNEQSS.
-static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+/// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
+/// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
+/// OR -> CMPNEQSS.
+static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
unsigned opcode;
// SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
// we're requiring SSE2 for both.
- if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
+ if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue CMP0 = N0->getOperand(1);
@@ -25530,7 +27909,7 @@ static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
// FIXME: need symbolic constants for these magic numbers.
// See X86ATTInstPrinter.cpp:printSSECC().
unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
- if (Subtarget->hasAVX512()) {
+ if (Subtarget.hasAVX512()) {
SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
CMP01,
DAG.getConstant(x86cc, DL, MVT::i8));
@@ -25547,7 +27926,7 @@ static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
bool is64BitFP = (CMP00.getValueType() == MVT::f64);
MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
- if (is64BitFP && !Subtarget->is64Bit()) {
+ if (is64BitFP && !Subtarget.is64Bit()) {
// On a 32-bit target, we cannot bitcast the 64-bit float to a
// 64-bit integer, since that's not a legal type. Since
// OnesOrZeroesF is all ones of all zeroes, we don't need all the
@@ -25574,34 +27953,47 @@ static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-/// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
-/// so it can be folded inside ANDNP.
-static bool CanFoldXORWithAllOnes(const SDNode *N) {
+/// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
+static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
+ assert(N->getOpcode() == ISD::AND);
+
EVT VT = N->getValueType(0);
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SDLoc DL(N);
- // Match direct AllOnes for 128 and 256-bit vectors
- if (ISD::isBuildVectorAllOnes(N))
- return true;
+ if (VT != MVT::v2i64 && VT != MVT::v4i64 &&
+ VT != MVT::v8i64 && VT != MVT::v16i32 &&
+ VT != MVT::v4i32 && VT != MVT::v8i32) // Legal with VLX
+ return SDValue();
- // Look through a bit convert.
- if (N->getOpcode() == ISD::BITCAST)
- N = N->getOperand(0).getNode();
-
- // Sometimes the operand may come from a insert_subvector building a 256-bit
- // allones vector
- if (VT.is256BitVector() &&
- N->getOpcode() == ISD::INSERT_SUBVECTOR) {
- SDValue V1 = N->getOperand(0);
- SDValue V2 = N->getOperand(1);
-
- if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
- V1.getOperand(0).getOpcode() == ISD::UNDEF &&
- ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
- ISD::isBuildVectorAllOnes(V2.getNode()))
- return true;
- }
+ // Canonicalize XOR to the left.
+ if (N1.getOpcode() == ISD::XOR)
+ std::swap(N0, N1);
- return false;
+ if (N0.getOpcode() != ISD::XOR)
+ return SDValue();
+
+ SDValue N00 = N0->getOperand(0);
+ SDValue N01 = N0->getOperand(1);
+
+ N01 = peekThroughBitcasts(N01);
+
+ // Either match a direct AllOnes for 128, 256, and 512-bit vectors, or an
+ // insert_subvector building a 256-bit AllOnes vector.
+ if (!ISD::isBuildVectorAllOnes(N01.getNode())) {
+ if (!VT.is256BitVector() || N01->getOpcode() != ISD::INSERT_SUBVECTOR)
+ return SDValue();
+
+ SDValue V1 = N01->getOperand(0);
+ SDValue V2 = N01->getOperand(1);
+ if (V1.getOpcode() != ISD::INSERT_SUBVECTOR ||
+ !V1.getOperand(0).isUndef() ||
+ !ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) ||
+ !ISD::isBuildVectorAllOnes(V2.getNode()))
+ return SDValue();
+ }
+ return DAG.getNode(X86ISD::ANDNP, DL, VT, N00, N1);
}
// On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
@@ -25610,7 +28002,7 @@ static bool CanFoldXORWithAllOnes(const SDNode *N) {
// some of the transition sequences.
static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
if (!VT.is256BitVector())
return SDValue();
@@ -25660,8 +28052,7 @@ static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
if (RHSConstSplat) {
N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getVectorElementType(),
SDValue(RHSConstSplat, 0));
- SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
- N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
+ N1 = DAG.getSplatBuildVector(WideVT, DL, N1);
} else if (RHSTrunc) {
N1 = N1->getOperand(0);
}
@@ -25687,9 +28078,9 @@ static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
}
}
-static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
+static SDValue combineVectorZext(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDLoc DL(N);
@@ -25705,8 +28096,7 @@ static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
// The other side of the AND should be a splat of 2^C, where C
// is the number of bits in the source type.
- if (N1.getOpcode() == ISD::BITCAST)
- N1 = N1.getOperand(0);
+ N1 = peekThroughBitcasts(N1);
if (N1.getOpcode() != ISD::BUILD_VECTOR)
return SDValue();
BuildVectorSDNode *Vector = cast<BuildVectorSDNode>(N1);
@@ -25715,10 +28105,11 @@ static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
EVT SrcType = Shuffle->getValueType(0);
// We expect a single-source shuffle
- if (Shuffle->getOperand(1)->getOpcode() != ISD::UNDEF)
+ if (!Shuffle->getOperand(1)->isUndef())
return SDValue();
unsigned SrcSize = SrcType.getScalarSizeInBits();
+ unsigned NumElems = SrcType.getVectorNumElements();
APInt SplatValue, SplatUndef;
unsigned SplatBitSize;
@@ -25742,7 +28133,7 @@ static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
// the source and dest type.
unsigned ZextRatio = ResSize / SrcSize;
bool IsZext = true;
- for (unsigned i = 0; i < SrcType.getVectorNumElements(); ++i) {
+ for (unsigned i = 0; i != NumElems; ++i) {
if (i % ZextRatio) {
if (Shuffle->getMaskElt(i) > 0) {
// Expected undef
@@ -25765,8 +28156,7 @@ static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
// a shuffle of the form <0, k, k, k, 1, k, k, k> with zero
// (instead of undef) where the k elements come from the zero vector.
SmallVector<int, 8> Mask;
- unsigned NumElems = SrcType.getVectorNumElements();
- for (unsigned i = 0; i < NumElems; ++i)
+ for (unsigned i = 0; i != NumElems; ++i)
if (i % ZextRatio)
Mask.push_back(NumElems);
else
@@ -25781,7 +28171,7 @@ static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
/// types, try to convert this into a floating point logic node to avoid
/// unnecessary moves from SSE to integer registers.
static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
unsigned FPOpcode = ISD::DELETED_NODE;
if (N->getOpcode() == ISD::AND)
FPOpcode = X86ISD::FAND;
@@ -25798,8 +28188,8 @@ static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
SDValue N1 = N->getOperand(1);
SDLoc DL(N);
if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
- ((Subtarget->hasSSE1() && VT == MVT::i32) ||
- (Subtarget->hasSSE2() && VT == MVT::i64))) {
+ ((Subtarget.hasSSE1() && VT == MVT::i32) ||
+ (Subtarget.hasSSE2() && VT == MVT::i64))) {
SDValue N00 = N0.getOperand(0);
SDValue N10 = N1.getOperand(0);
EVT N00Type = N00.getValueType();
@@ -25812,21 +28202,63 @@ static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+/// If this is a PCMPEQ or PCMPGT result that is bitwise-anded with 1 (this is
+/// the x86 lowering of a SETCC + ZEXT), replace the 'and' with a shift-right to
+/// eliminate loading the vector constant mask value. This relies on the fact
+/// that a PCMP always creates an all-ones or all-zeros bitmask per element.
+static SDValue combinePCMPAnd1(SDNode *N, SelectionDAG &DAG) {
+ SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
+ SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
+
+ // TODO: Use AssertSext to mark any nodes that have the property of producing
+ // all-ones or all-zeros. Then check for that node rather than particular
+ // opcodes.
+ if (Op0.getOpcode() != X86ISD::PCMPEQ && Op0.getOpcode() != X86ISD::PCMPGT)
+ return SDValue();
+
+ // The existence of the PCMP node guarantees that we have the required SSE2 or
+ // AVX2 for a shift of this vector type, but there is no vector shift by
+ // immediate for a vector with byte elements (PSRLB). 512-bit vectors use the
+ // masked compare nodes, so they should not make it here.
+ EVT VT0 = Op0.getValueType();
+ EVT VT1 = Op1.getValueType();
+ unsigned EltBitWidth = VT0.getScalarType().getSizeInBits();
+ if (VT0 != VT1 || EltBitWidth == 8)
+ return SDValue();
+
+ assert(VT0.getSizeInBits() == 128 || VT0.getSizeInBits() == 256);
+
+ APInt SplatVal;
+ if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) || SplatVal != 1)
+ return SDValue();
+
+ SDLoc DL(N);
+ SDValue ShAmt = DAG.getConstant(EltBitWidth - 1, DL, MVT::i8);
+ SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT0, Op0, ShAmt);
+ return DAG.getBitcast(N->getValueType(0), Shift);
+}
+
+static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
- if (SDValue Zext = VectorZextCombine(N, DAG, DCI, Subtarget))
+ if (SDValue Zext = combineVectorZext(N, DAG, DCI, Subtarget))
return Zext;
- if (SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget))
+ if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
return R;
if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
return FPLogic;
+ if (SDValue R = combineANDXORWithAllOnesIntoANDNP(N, DAG))
+ return R;
+
+ if (SDValue ShiftRight = combinePCMPAnd1(N, DAG))
+ return ShiftRight;
+
EVT VT = N->getValueType(0);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -25834,143 +28266,176 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
// Create BEXTR instructions
// BEXTR is ((X >> imm) & (2**size-1))
- if (VT == MVT::i32 || VT == MVT::i64) {
- // Check for BEXTR.
- if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
- (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
- ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
- ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
- if (MaskNode && ShiftNode) {
- uint64_t Mask = MaskNode->getZExtValue();
- uint64_t Shift = ShiftNode->getZExtValue();
- if (isMask_64(Mask)) {
- uint64_t MaskSize = countPopulation(Mask);
- if (Shift + MaskSize <= VT.getSizeInBits())
- return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
- DAG.getConstant(Shift | (MaskSize << 8), DL,
- VT));
- }
- }
- } // BEXTR
+ if (VT != MVT::i32 && VT != MVT::i64)
+ return SDValue();
+ if (!Subtarget.hasBMI() && !Subtarget.hasTBM())
return SDValue();
+ if (N0.getOpcode() != ISD::SRA && N0.getOpcode() != ISD::SRL)
+ return SDValue();
+
+ ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
+ ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
+ if (MaskNode && ShiftNode) {
+ uint64_t Mask = MaskNode->getZExtValue();
+ uint64_t Shift = ShiftNode->getZExtValue();
+ if (isMask_64(Mask)) {
+ uint64_t MaskSize = countPopulation(Mask);
+ if (Shift + MaskSize <= VT.getSizeInBits())
+ return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
+ DAG.getConstant(Shift | (MaskSize << 8), DL,
+ VT));
+ }
}
+ return SDValue();
+}
- // Want to form ANDNP nodes:
- // 1) In the hopes of then easily combining them with OR and AND nodes
- // to form PBLEND/PSIGN.
- // 2) To match ANDN packed intrinsics
- if (VT != MVT::v2i64 && VT != MVT::v4i64)
+// Try to fold:
+// (or (and (m, y), (pandn m, x)))
+// into:
+// (vselect m, x, y)
+// As a special case, try to fold:
+// (or (and (m, (sub 0, x)), (pandn m, x)))
+// into:
+// (sub (xor X, M), M)
+static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ assert(N->getOpcode() == ISD::OR);
+
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ EVT VT = N->getValueType(0);
+
+ if (!((VT == MVT::v2i64) || (VT == MVT::v4i64 && Subtarget.hasInt256())))
+ return SDValue();
+ assert(Subtarget.hasSSE2() && "Unexpected i64 vector without SSE2!");
+
+ // Canonicalize pandn to RHS
+ if (N0.getOpcode() == X86ISD::ANDNP)
+ std::swap(N0, N1);
+
+ if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
return SDValue();
- // Check LHS for vnot
- if (N0.getOpcode() == ISD::XOR &&
- //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
- CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
- return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
+ SDValue Mask = N1.getOperand(0);
+ SDValue X = N1.getOperand(1);
+ SDValue Y;
+ if (N0.getOperand(0) == Mask)
+ Y = N0.getOperand(1);
+ if (N0.getOperand(1) == Mask)
+ Y = N0.getOperand(0);
- // Check RHS for vnot
- if (N1.getOpcode() == ISD::XOR &&
- //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
- CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
- return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
+ // Check to see if the mask appeared in both the AND and ANDNP.
+ if (!Y.getNode())
+ return SDValue();
- return SDValue();
+ // Validate that X, Y, and Mask are bitcasts, and see through them.
+ Mask = peekThroughBitcasts(Mask);
+ X = peekThroughBitcasts(X);
+ Y = peekThroughBitcasts(Y);
+
+ EVT MaskVT = Mask.getValueType();
+
+ // Validate that the Mask operand is a vector sra node.
+ // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
+ // there is no psrai.b
+ unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
+ unsigned SraAmt = ~0;
+ if (Mask.getOpcode() == ISD::SRA) {
+ if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
+ if (auto *AmtConst = AmtBV->getConstantSplatNode())
+ SraAmt = AmtConst->getZExtValue();
+ } else if (Mask.getOpcode() == X86ISD::VSRAI) {
+ SDValue SraC = Mask.getOperand(1);
+ SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
+ }
+ if ((SraAmt + 1) != EltBits)
+ return SDValue();
+
+ SDLoc DL(N);
+
+ // Try to match:
+ // (or (and (M, (sub 0, X)), (pandn M, X)))
+ // which is a special case of vselect:
+ // (vselect M, (sub 0, X), X)
+ // Per:
+ // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
+ // We know that, if fNegate is 0 or 1:
+ // (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
+ //
+ // Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
+ // ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
+ // ( M ? -X : X) == ((X ^ M ) + (M & 1))
+ // This lets us transform our vselect to:
+ // (add (xor X, M), (and M, 1))
+ // And further to:
+ // (sub (xor X, M), M)
+ if (X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
+ auto IsNegV = [](SDNode *N, SDValue V) {
+ return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
+ ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
+ };
+ SDValue V;
+ if (IsNegV(Y.getNode(), X))
+ V = X;
+ else if (IsNegV(X.getNode(), Y))
+ V = Y;
+
+ if (V) {
+ assert(EltBits == 8 || EltBits == 16 || EltBits == 32);
+ SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
+ SDValue SubOp2 = Mask;
+
+ // If the negate was on the false side of the select, then
+ // the operands of the SUB need to be swapped. PR 27251.
+ // This is because the pattern being matched above is
+ // (vselect M, (sub (0, X), X) -> (sub (xor X, M), M)
+ // but if the pattern matched was
+ // (vselect M, X, (sub (0, X))), that is really negation of the pattern
+ // above, -(vselect M, (sub 0, X), X), and therefore the replacement
+ // pattern also needs to be a negation of the replacement pattern above.
+ // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
+ // sub accomplishes the negation of the replacement pattern.
+ if (V == Y)
+ std::swap(SubOp1, SubOp2);
+
+ return DAG.getBitcast(VT,
+ DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2));
+ }
+ }
+
+ // PBLENDVB is only available on SSE 4.1.
+ if (!Subtarget.hasSSE41())
+ return SDValue();
+
+ MVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
+
+ X = DAG.getBitcast(BlendVT, X);
+ Y = DAG.getBitcast(BlendVT, Y);
+ Mask = DAG.getBitcast(BlendVT, Mask);
+ Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
+ return DAG.getBitcast(VT, Mask);
}
-static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
- if (SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget))
+ if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
return R;
if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
return FPLogic;
+ if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
+ return R;
+
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
- // look for psign/blend
- if (VT == MVT::v2i64 || VT == MVT::v4i64) {
- if (!Subtarget->hasSSSE3() ||
- (VT == MVT::v4i64 && !Subtarget->hasInt256()))
- return SDValue();
-
- // Canonicalize pandn to RHS
- if (N0.getOpcode() == X86ISD::ANDNP)
- std::swap(N0, N1);
- // or (and (m, y), (pandn m, x))
- if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
- SDValue Mask = N1.getOperand(0);
- SDValue X = N1.getOperand(1);
- SDValue Y;
- if (N0.getOperand(0) == Mask)
- Y = N0.getOperand(1);
- if (N0.getOperand(1) == Mask)
- Y = N0.getOperand(0);
-
- // Check to see if the mask appeared in both the AND and ANDNP and
- if (!Y.getNode())
- return SDValue();
-
- // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
- // Look through mask bitcast.
- if (Mask.getOpcode() == ISD::BITCAST)
- Mask = Mask.getOperand(0);
- if (X.getOpcode() == ISD::BITCAST)
- X = X.getOperand(0);
- if (Y.getOpcode() == ISD::BITCAST)
- Y = Y.getOperand(0);
-
- EVT MaskVT = Mask.getValueType();
-
- // Validate that the Mask operand is a vector sra node.
- // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
- // there is no psrai.b
- unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
- unsigned SraAmt = ~0;
- if (Mask.getOpcode() == ISD::SRA) {
- if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
- if (auto *AmtConst = AmtBV->getConstantSplatNode())
- SraAmt = AmtConst->getZExtValue();
- } else if (Mask.getOpcode() == X86ISD::VSRAI) {
- SDValue SraC = Mask.getOperand(1);
- SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
- }
- if ((SraAmt + 1) != EltBits)
- return SDValue();
-
- SDLoc DL(N);
-
- // Now we know we at least have a plendvb with the mask val. See if
- // we can form a psignb/w/d.
- // psign = x.type == y.type == mask.type && y = sub(0, x);
- if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
- ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
- X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
- assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
- "Unsupported VT for PSIGN");
- Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
- return DAG.getBitcast(VT, Mask);
- }
- // PBLENDVB only available on SSE 4.1
- if (!Subtarget->hasSSE41())
- return SDValue();
-
- MVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
-
- X = DAG.getBitcast(BlendVT, X);
- Y = DAG.getBitcast(BlendVT, Y);
- Mask = DAG.getBitcast(BlendVT, Mask);
- Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
- return DAG.getBitcast(VT, Mask);
- }
- }
-
if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
return SDValue();
@@ -25982,7 +28447,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
// series of shifts/or that would otherwise be generated.
// Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
// have higher latencies and we are not optimizing for size.
- if (!OptForSize && Subtarget->isSHLDSlow())
+ if (!OptForSize && Subtarget.isSHLDSlow())
return SDValue();
if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
@@ -26040,7 +28505,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
}
// Generate NEG and CMOV for integer abs.
-static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue combineIntegerAbs(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
// Since X86 does not have CMOV for 8-bit integer, we don't convert
@@ -26073,13 +28538,14 @@ static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
-// Try to turn tests against the signbit in the form of:
-// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
-// into:
-// SETGT(X, -1)
+/// Try to turn tests against the signbit in the form of:
+/// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
+/// into:
+/// SETGT(X, -1)
static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
- // This is only worth doing if the output type is i8.
- if (N->getValueType(0) != MVT::i8)
+ // This is only worth doing if the output type is i8 or i1.
+ EVT ResultType = N->getValueType(0);
+ if (ResultType != MVT::i8 && ResultType != MVT::i1)
return SDValue();
SDValue N0 = N->getOperand(0);
@@ -26114,22 +28580,78 @@ static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
SDLoc DL(N);
SDValue ShiftOp = Shift.getOperand(0);
EVT ShiftOpTy = ShiftOp.getValueType();
- SDValue Cond = DAG.getSetCC(DL, MVT::i8, ShiftOp,
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
+ *DAG.getContext(), ResultType);
+ SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
+ if (SetCCResultType != ResultType)
+ Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
return Cond;
}
-static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
+/// Turn vector tests of the signbit in the form of:
+/// xor (sra X, elt_size(X)-1), -1
+/// into:
+/// pcmpgt X, -1
+///
+/// This should be called before type legalization because the pattern may not
+/// persist after that.
+static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ EVT VT = N->getValueType(0);
+ if (!VT.isSimple())
+ return SDValue();
+
+ switch (VT.getSimpleVT().SimpleTy) {
+ default: return SDValue();
+ case MVT::v16i8:
+ case MVT::v8i16:
+ case MVT::v4i32: if (!Subtarget.hasSSE2()) return SDValue(); break;
+ case MVT::v2i64: if (!Subtarget.hasSSE42()) return SDValue(); break;
+ case MVT::v32i8:
+ case MVT::v16i16:
+ case MVT::v8i32:
+ case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
+ }
+
+ // There must be a shift right algebraic before the xor, and the xor must be a
+ // 'not' operation.
+ SDValue Shift = N->getOperand(0);
+ SDValue Ones = N->getOperand(1);
+ if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
+ !ISD::isBuildVectorAllOnes(Ones.getNode()))
+ return SDValue();
+
+ // The shift should be smearing the sign bit across each vector element.
+ auto *ShiftBV = dyn_cast<BuildVectorSDNode>(Shift.getOperand(1));
+ if (!ShiftBV)
+ return SDValue();
+
+ EVT ShiftEltTy = Shift.getValueType().getVectorElementType();
+ auto *ShiftAmt = ShiftBV->getConstantSplatNode();
+ if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1)
+ return SDValue();
+
+ // Create a greater-than comparison against -1. We don't use the more obvious
+ // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
+ return DAG.getNode(X86ISD::PCMPGT, SDLoc(N), VT, Shift.getOperand(0), Ones);
+}
+
+static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
+ if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
+ return Cmp;
+
if (DCI.isBeforeLegalizeOps())
return SDValue();
if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
return RV;
- if (Subtarget->hasCMov())
- if (SDValue RV = performIntegerAbsCombine(N, DAG))
+ if (Subtarget.hasCMov())
+ if (SDValue RV = combineIntegerAbs(N, DAG))
return RV;
if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
@@ -26142,7 +28664,8 @@ static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
/// which is c = (a + b + 1) / 2, and replace this operation with the efficient
/// X86ISD::AVG instruction.
static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
- const X86Subtarget *Subtarget, SDLoc DL) {
+ const X86Subtarget &Subtarget,
+ const SDLoc &DL) {
if (!VT.isVector() || !VT.isSimple())
return SDValue();
EVT InVT = In.getValueType();
@@ -26159,10 +28682,12 @@ static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits())
return SDValue();
- if (Subtarget->hasAVX512()) {
+ if (!Subtarget.hasSSE2())
+ return SDValue();
+ if (Subtarget.hasAVX512()) {
if (VT.getSizeInBits() > 512)
return SDValue();
- } else if (Subtarget->hasAVX2()) {
+ } else if (Subtarget.hasAVX2()) {
if (VT.getSizeInBits() > 256)
return SDValue();
} else {
@@ -26221,10 +28746,8 @@ static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
Operands[0].getOperand(0).getValueType() == VT) {
// The pattern is detected. Subtract one from the constant vector, then
// demote it and emit X86ISD::AVG instruction.
- SDValue One = DAG.getConstant(1, DL, InScalarVT);
- SDValue Ones = DAG.getNode(ISD::BUILD_VECTOR, DL, InVT,
- SmallVector<SDValue, 8>(NumElems, One));
- Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], Ones);
+ SDValue VecOnes = DAG.getConstant(1, DL, InVT);
+ Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]);
return DAG.getNode(X86ISD::AVG, DL, VT, Operands[0].getOperand(0),
Operands[1]);
@@ -26258,10 +28781,9 @@ static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
return SDValue();
}
-/// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
-static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
LoadSDNode *Ld = cast<LoadSDNode>(N);
EVT RegVT = Ld->getValueType(0);
EVT MemVT = Ld->getMemoryVT();
@@ -26283,41 +28805,180 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
SDValue Ptr = Ld->getBasePtr();
- SDValue Increment =
- DAG.getConstant(16, dl, TLI.getPointerTy(DAG.getDataLayout()));
EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
NumElems/2);
- SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
- Ld->getPointerInfo(), Ld->isVolatile(),
- Ld->isNonTemporal(), Ld->isInvariant(),
- Alignment);
- Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
- SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
- Ld->getPointerInfo(), Ld->isVolatile(),
- Ld->isNonTemporal(), Ld->isInvariant(),
- std::min(16U, Alignment));
+ SDValue Load1 =
+ DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
+ Alignment, Ld->getMemOperand()->getFlags());
+
+ Ptr = DAG.getMemBasePlusOffset(Ptr, 16, dl);
+ SDValue Load2 =
+ DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
+ std::min(16U, Alignment), Ld->getMemOperand()->getFlags());
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
Load1.getValue(1),
Load2.getValue(1));
SDValue NewVec = DAG.getUNDEF(RegVT);
- NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
- NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
+ NewVec = insert128BitVector(NewVec, Load1, 0, DAG, dl);
+ NewVec = insert128BitVector(NewVec, Load2, NumElems / 2, DAG, dl);
return DCI.CombineTo(N, NewVec, TF, true);
}
return SDValue();
}
-/// PerformMLOADCombine - Resolve extending loads
-static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+/// If V is a build vector of boolean constants and exactly one of those
+/// constants is true, return the operand index of that true element.
+/// Otherwise, return -1.
+static int getOneTrueElt(SDValue V) {
+ // This needs to be a build vector of booleans.
+ // TODO: Checking for the i1 type matches the IR definition for the mask,
+ // but the mask check could be loosened to i8 or other types. That might
+ // also require checking more than 'allOnesValue'; eg, the x86 HW
+ // instructions only require that the MSB is set for each mask element.
+ // The ISD::MSTORE comments/definition do not specify how the mask operand
+ // is formatted.
+ auto *BV = dyn_cast<BuildVectorSDNode>(V);
+ if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
+ return -1;
+
+ int TrueIndex = -1;
+ unsigned NumElts = BV->getValueType(0).getVectorNumElements();
+ for (unsigned i = 0; i < NumElts; ++i) {
+ const SDValue &Op = BV->getOperand(i);
+ if (Op.isUndef())
+ continue;
+ auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
+ if (!ConstNode)
+ return -1;
+ if (ConstNode->getAPIntValue().isAllOnesValue()) {
+ // If we already found a one, this is too many.
+ if (TrueIndex >= 0)
+ return -1;
+ TrueIndex = i;
+ }
+ }
+ return TrueIndex;
+}
+
+/// Given a masked memory load/store operation, return true if it has one mask
+/// bit set. If it has one mask bit set, then also return the memory address of
+/// the scalar element to load/store, the vector index to insert/extract that
+/// scalar element, and the alignment for the scalar memory access.
+static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
+ SelectionDAG &DAG, SDValue &Addr,
+ SDValue &Index, unsigned &Alignment) {
+ int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
+ if (TrueMaskElt < 0)
+ return false;
+
+ // Get the address of the one scalar element that is specified by the mask
+ // using the appropriate offset from the base pointer.
+ EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
+ Addr = MaskedOp->getBasePtr();
+ if (TrueMaskElt != 0) {
+ unsigned Offset = TrueMaskElt * EltVT.getStoreSize();
+ Addr = DAG.getMemBasePlusOffset(Addr, Offset, SDLoc(MaskedOp));
+ }
+
+ Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
+ Alignment = MinAlign(MaskedOp->getAlignment(), EltVT.getStoreSize());
+ return true;
+}
+
+/// If exactly one element of the mask is set for a non-extending masked load,
+/// it is a scalar load and vector insert.
+/// Note: It is expected that the degenerate cases of an all-zeros or all-ones
+/// mask have already been optimized in IR, so we don't bother with those here.
+static SDValue
+reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
+ // However, some target hooks may need to be added to know when the transform
+ // is profitable. Endianness would also have to be considered.
+
+ SDValue Addr, VecIndex;
+ unsigned Alignment;
+ if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment))
+ return SDValue();
+
+ // Load the one scalar element that is specified by the mask using the
+ // appropriate offset from the base pointer.
+ SDLoc DL(ML);
+ EVT VT = ML->getValueType(0);
+ EVT EltVT = VT.getVectorElementType();
+ SDValue Load =
+ DAG.getLoad(EltVT, DL, ML->getChain(), Addr, ML->getPointerInfo(),
+ Alignment, ML->getMemOperand()->getFlags());
+
+ // Insert the loaded element into the appropriate place in the vector.
+ SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, ML->getSrc0(),
+ Load, VecIndex);
+ return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
+}
+
+static SDValue
+combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
+ return SDValue();
+
+ SDLoc DL(ML);
+ EVT VT = ML->getValueType(0);
+
+ // If we are loading the first and last elements of a vector, it is safe and
+ // always faster to load the whole vector. Replace the masked load with a
+ // vector load and select.
+ unsigned NumElts = VT.getVectorNumElements();
+ BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
+ bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
+ bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
+ if (LoadFirstElt && LoadLastElt) {
+ SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
+ ML->getMemOperand());
+ SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd, ML->getSrc0());
+ return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
+ }
+
+ // Convert a masked load with a constant mask into a masked load and a select.
+ // This allows the select operation to use a faster kind of select instruction
+ // (for example, vblendvps -> vblendps).
+
+ // Don't try this if the pass-through operand is already undefined. That would
+ // cause an infinite loop because that's what we're about to create.
+ if (ML->getSrc0().isUndef())
+ return SDValue();
+
+ // The new masked load has an undef pass-through operand. The select uses the
+ // original pass-through operand.
+ SDValue NewML = DAG.getMaskedLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
+ ML->getMask(), DAG.getUNDEF(VT),
+ ML->getMemoryVT(), ML->getMemOperand(),
+ ML->getExtensionType());
+ SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML, ML->getSrc0());
+
+ return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
+}
+
+static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
+ if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
+ if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI))
+ return ScalarLoad;
+ // TODO: Do some AVX512 subsets benefit from this transform?
+ if (!Subtarget.hasAVX512())
+ if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
+ return Blend;
+ }
+
if (Mld->getExtensionType() != ISD::SEXTLOAD)
return SDValue();
+ // Resolve extending loads.
EVT VT = Mld->getValueType(0);
unsigned NumElems = VT.getVectorNumElements();
EVT LdVT = Mld->getMemoryVT();
@@ -26326,21 +28987,21 @@ static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
assert(LdVT != VT && "Cannot extend to the same type");
unsigned ToSz = VT.getVectorElementType().getSizeInBits();
unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
- // From, To sizes and ElemCount must be pow of two
+ // From/To sizes and ElemCount must be pow of two.
assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
"Unexpected size for extending masked load");
unsigned SizeRatio = ToSz / FromSz;
assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
- // Create a type on which we perform the shuffle
+ // Create a type on which we perform the shuffle.
EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
LdVT.getScalarType(), NumElems*SizeRatio);
assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
- // Convert Src0 value
+ // Convert Src0 value.
SDValue WideSrc0 = DAG.getBitcast(WideVecVT, Mld->getSrc0());
- if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
+ if (!Mld->getSrc0().isUndef()) {
SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
for (unsigned i = 0; i != NumElems; ++i)
ShuffleVec[i] = i * SizeRatio;
@@ -26349,13 +29010,13 @@ static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
"WideVecVT should be legal");
WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
- DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
+ DAG.getUNDEF(WideVecVT), ShuffleVec);
}
- // Prepare the new mask
+ // Prepare the new mask.
SDValue NewMask;
SDValue Mask = Mld->getMask();
if (Mask.getValueType() == VT) {
- // Mask and original value have the same type
+ // Mask and original value have the same type.
NewMask = DAG.getBitcast(WideVecVT, Mask);
SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
for (unsigned i = 0; i != NumElems; ++i)
@@ -26364,9 +29025,8 @@ static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
ShuffleVec[i] = NumElems * SizeRatio;
NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
DAG.getConstant(0, dl, WideVecVT),
- &ShuffleVec[0]);
- }
- else {
+ ShuffleVec);
+ } else {
assert(Mask.getValueType().getVectorElementType() == MVT::i1);
unsigned WidenNumElts = NumElems*SizeRatio;
unsigned MaskNumElts = VT.getVectorNumElements();
@@ -26390,13 +29050,41 @@ static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
}
-/// PerformMSTORECombine - Resolve truncating stores
-static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+
+/// If exactly one element of the mask is set for a non-truncating masked store,
+/// it is a vector extract and scalar store.
+/// Note: It is expected that the degenerate cases of an all-zeros or all-ones
+/// mask have already been optimized in IR, so we don't bother with those here.
+static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
+ SelectionDAG &DAG) {
+ // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
+ // However, some target hooks may need to be added to know when the transform
+ // is profitable. Endianness would also have to be considered.
+
+ SDValue Addr, VecIndex;
+ unsigned Alignment;
+ if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment))
+ return SDValue();
+
+ // Extract the one scalar element that is actually being stored.
+ SDLoc DL(MS);
+ EVT VT = MS->getValue().getValueType();
+ EVT EltVT = VT.getVectorElementType();
+ SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
+ MS->getValue(), VecIndex);
+
+ // Store that element at the appropriate offset from the base pointer.
+ return DAG.getStore(MS->getChain(), DL, Extract, Addr, MS->getPointerInfo(),
+ Alignment, MS->getMemOperand()->getFlags());
+}
+
+static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
if (!Mst->isTruncatingStore())
- return SDValue();
+ return reduceMaskedStoreToScalarStore(Mst, DAG);
+ // Resolve truncating stores.
EVT VT = Mst->getValue().getValueType();
unsigned NumElems = VT.getVectorNumElements();
EVT StVT = Mst->getMemoryVT();
@@ -26415,7 +29103,7 @@ static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
if (TLI.isTruncStoreLegal(VT, StVT))
return SDValue();
- // From, To sizes and ElemCount must be pow of two
+ // From/To sizes and ElemCount must be pow of two.
assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
"Unexpected size for truncating masked store");
// We are going to use the original vector elt for storing.
@@ -26426,7 +29114,7 @@ static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
unsigned SizeRatio = FromSz / ToSz;
assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
- // Create a type on which we perform the shuffle
+ // Create a type on which we perform the shuffle.
EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
StVT.getScalarType(), NumElems*SizeRatio);
@@ -26443,12 +29131,12 @@ static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
DAG.getUNDEF(WideVecVT),
- &ShuffleVec[0]);
+ ShuffleVec);
SDValue NewMask;
SDValue Mask = Mst->getMask();
if (Mask.getValueType() == VT) {
- // Mask and original value have the same type
+ // Mask and original value have the same type.
NewMask = DAG.getBitcast(WideVecVT, Mask);
for (unsigned i = 0; i != NumElems; ++i)
ShuffleVec[i] = i * SizeRatio;
@@ -26456,9 +29144,8 @@ static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
ShuffleVec[i] = NumElems*SizeRatio;
NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
DAG.getConstant(0, dl, WideVecVT),
- &ShuffleVec[0]);
- }
- else {
+ ShuffleVec);
+ } else {
assert(Mask.getValueType().getVectorElementType() == MVT::i1);
unsigned WidenNumElts = NumElems*SizeRatio;
unsigned MaskNumElts = VT.getVectorNumElements();
@@ -26479,9 +29166,9 @@ static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
Mst->getBasePtr(), NewMask, StVT,
Mst->getMemOperand(), false);
}
-/// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
-static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+
+static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
StoreSDNode *St = cast<StoreSDNode>(N);
EVT VT = St->getValue().getValueType();
EVT StVT = St->getMemoryVT();
@@ -26496,26 +29183,24 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
unsigned Alignment = St->getAlignment();
if (VT.is256BitVector() && StVT == VT &&
TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
- AddressSpace, Alignment, &Fast) && !Fast) {
+ AddressSpace, Alignment, &Fast) &&
+ !Fast) {
unsigned NumElems = VT.getVectorNumElements();
if (NumElems < 2)
return SDValue();
- SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
- SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
+ SDValue Value0 = extract128BitVector(StoredVal, 0, DAG, dl);
+ SDValue Value1 = extract128BitVector(StoredVal, NumElems / 2, DAG, dl);
- SDValue Stride =
- DAG.getConstant(16, dl, TLI.getPointerTy(DAG.getDataLayout()));
SDValue Ptr0 = St->getBasePtr();
- SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
-
- SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
- St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(), Alignment);
- SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
- St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(),
- std::min(16U, Alignment));
+ SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 16, dl);
+
+ SDValue Ch0 =
+ DAG.getStore(St->getChain(), dl, Value0, Ptr0, St->getPointerInfo(),
+ Alignment, St->getMemOperand()->getFlags());
+ SDValue Ch1 =
+ DAG.getStore(St->getChain(), dl, Value1, Ptr1, St->getPointerInfo(),
+ std::min(16U, Alignment), St->getMemOperand()->getFlags());
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
}
@@ -26526,12 +29211,11 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
// Check if we can detect an AVG pattern from the truncation. If yes,
// replace the trunc store by a normal store with the result of X86ISD::AVG
// instruction.
- SDValue Avg =
- detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG, Subtarget, dl);
- if (Avg.getNode())
+ if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
+ Subtarget, dl))
return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
- St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(), St->getAlignment());
+ St->getPointerInfo(), St->getAlignment(),
+ St->getMemOperand()->getFlags());
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned NumElems = VT.getVectorNumElements();
@@ -26543,7 +29227,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
// vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw
// are designated for truncate store.
// In this case we don't need any further transformations.
- if (TLI.isTruncStoreLegal(VT, StVT))
+ if (TLI.isTruncStoreLegalOrCustom(VT, StVT))
return SDValue();
// From, To sizes and ElemCount must be pow of two
@@ -26573,7 +29257,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
DAG.getUNDEF(WideVecVT),
- &ShuffleVec[0]);
+ ShuffleVec);
// At this point all of the data is stored at the bottom of the
// register. We now need to save it to mem.
@@ -26595,8 +29279,6 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
SDValue ShuffWide = DAG.getBitcast(StoreVecVT, Shuff);
SmallVector<SDValue, 8> Chains;
- SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, dl,
- TLI.getPointerTy(DAG.getDataLayout()));
SDValue Ptr = St->getBasePtr();
// Perform one or more big stores into memory.
@@ -26604,10 +29286,10 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
StoreType, ShuffWide,
DAG.getIntPtrConstant(i, dl));
- SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
- St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(), St->getAlignment());
- Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
+ SDValue Ch =
+ DAG.getStore(St->getChain(), dl, SubVec, Ptr, St->getPointerInfo(),
+ St->getAlignment(), St->getMemOperand()->getFlags());
+ Ptr = DAG.getMemBasePlusOffset(Ptr, StoreType.getStoreSize(), dl);
Chains.push_back(Ch);
}
@@ -26626,9 +29308,9 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
const Function *F = DAG.getMachineFunction().getFunction();
bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
bool F64IsLegal =
- !Subtarget->useSoftFloat() && !NoImplicitFloatOps && Subtarget->hasSSE2();
+ !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
if ((VT.isVector() ||
- (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
+ (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit())) &&
isa<LoadSDNode>(St->getValue()) &&
!cast<LoadSDNode>(St->getValue())->isVolatile() &&
St->getChain().hasOneUse() && !St->isVolatile()) {
@@ -26667,58 +29349,49 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
// If we are a 64-bit capable x86, lower to a single movq load/store pair.
// Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
// pair instead.
- if (Subtarget->is64Bit() || F64IsLegal) {
- MVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
+ if (Subtarget.is64Bit() || F64IsLegal) {
+ MVT LdVT = Subtarget.is64Bit() ? MVT::i64 : MVT::f64;
SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
- Ld->getPointerInfo(), Ld->isVolatile(),
- Ld->isNonTemporal(), Ld->isInvariant(),
- Ld->getAlignment());
+ Ld->getPointerInfo(), Ld->getAlignment(),
+ Ld->getMemOperand()->getFlags());
SDValue NewChain = NewLd.getValue(1);
- if (TokenFactorIndex != -1) {
+ if (TokenFactorIndex >= 0) {
Ops.push_back(NewChain);
NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
}
return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
- St->getPointerInfo(),
- St->isVolatile(), St->isNonTemporal(),
- St->getAlignment());
+ St->getPointerInfo(), St->getAlignment(),
+ St->getMemOperand()->getFlags());
}
// Otherwise, lower to two pairs of 32-bit loads / stores.
SDValue LoAddr = Ld->getBasePtr();
- SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
- DAG.getConstant(4, LdDL, MVT::i32));
+ SDValue HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, LdDL);
SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
- Ld->getPointerInfo(),
- Ld->isVolatile(), Ld->isNonTemporal(),
- Ld->isInvariant(), Ld->getAlignment());
+ Ld->getPointerInfo(), Ld->getAlignment(),
+ Ld->getMemOperand()->getFlags());
SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
Ld->getPointerInfo().getWithOffset(4),
- Ld->isVolatile(), Ld->isNonTemporal(),
- Ld->isInvariant(),
- MinAlign(Ld->getAlignment(), 4));
+ MinAlign(Ld->getAlignment(), 4),
+ Ld->getMemOperand()->getFlags());
SDValue NewChain = LoLd.getValue(1);
- if (TokenFactorIndex != -1) {
+ if (TokenFactorIndex >= 0) {
Ops.push_back(LoLd);
Ops.push_back(HiLd);
NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
}
LoAddr = St->getBasePtr();
- HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
- DAG.getConstant(4, StDL, MVT::i32));
-
- SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
- St->getPointerInfo(),
- St->isVolatile(), St->isNonTemporal(),
- St->getAlignment());
- SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
- St->getPointerInfo().getWithOffset(4),
- St->isVolatile(),
- St->isNonTemporal(),
- MinAlign(St->getAlignment(), 4));
+ HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, StDL);
+
+ SDValue LoSt =
+ DAG.getStore(NewChain, StDL, LoLd, LoAddr, St->getPointerInfo(),
+ St->getAlignment(), St->getMemOperand()->getFlags());
+ SDValue HiSt = DAG.getStore(
+ NewChain, StDL, HiLd, HiAddr, St->getPointerInfo().getWithOffset(4),
+ MinAlign(St->getAlignment(), 4), St->getMemOperand()->getFlags());
return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
}
@@ -26728,7 +29401,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
// to get past legalization. The execution dependencies fixup pass will
// choose the optimal machine instruction for the store if this really is
// an integer or v2f32 rather than an f64.
- if (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit() &&
+ if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
SDValue OldExtract = St->getOperand(1);
SDValue ExtOp0 = OldExtract.getOperand(0);
@@ -26738,8 +29411,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
BitCast, OldExtract.getOperand(1));
return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
- St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(), St->getAlignment());
+ St->getPointerInfo(), St->getAlignment(),
+ St->getMemOperand()->getFlags());
}
return SDValue();
@@ -26798,14 +29471,14 @@ static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
SDValue A, B;
SmallVector<int, 16> LMask(NumElts);
if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
- if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
+ if (!LHS.getOperand(0).isUndef())
A = LHS.getOperand(0);
- if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
+ if (!LHS.getOperand(1).isUndef())
B = LHS.getOperand(1);
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
std::copy(Mask.begin(), Mask.end(), LMask.begin());
} else {
- if (LHS.getOpcode() != ISD::UNDEF)
+ if (!LHS.isUndef())
A = LHS;
for (unsigned i = 0; i != NumElts; ++i)
LMask[i] = i;
@@ -26816,14 +29489,14 @@ static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
SDValue C, D;
SmallVector<int, 16> RMask(NumElts);
if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
- if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
+ if (!RHS.getOperand(0).isUndef())
C = RHS.getOperand(0);
- if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
+ if (!RHS.getOperand(1).isUndef())
D = RHS.getOperand(1);
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
std::copy(Mask.begin(), Mask.end(), RMask.begin());
} else {
- if (RHS.getOpcode() != ISD::UNDEF)
+ if (!RHS.isUndef())
C = RHS;
for (unsigned i = 0; i != NumElts; ++i)
RMask[i] = i;
@@ -26871,33 +29544,22 @@ static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
return true;
}
-/// Do target-specific dag combines on floating point adds.
-static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
- EVT VT = N->getValueType(0);
- SDValue LHS = N->getOperand(0);
- SDValue RHS = N->getOperand(1);
-
- // Try to synthesize horizontal adds from adds of shuffles.
- if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
- (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
- isHorizontalBinOp(LHS, RHS, true))
- return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
- return SDValue();
-}
-
-/// Do target-specific dag combines on floating point subs.
-static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+/// Do target-specific dag combines on floating-point adds/subs.
+static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
+ bool IsFadd = N->getOpcode() == ISD::FADD;
+ assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode");
- // Try to synthesize horizontal subs from subs of shuffles.
- if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
- (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
- isHorizontalBinOp(LHS, RHS, false))
- return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
+ // Try to synthesize horizontal add/sub from adds/subs of shuffles.
+ if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
+ (Subtarget.hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
+ isHorizontalBinOp(LHS, RHS, IsFadd)) {
+ auto NewOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
+ return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS);
+ }
return SDValue();
}
@@ -26916,13 +29578,11 @@ combineVectorTruncationWithPACKUS(SDNode *N, SelectionDAG &DAG,
// First, use mask to unset all bits that won't appear in the result.
assert((OutSVT == MVT::i8 || OutSVT == MVT::i16) &&
"OutSVT can only be either i8 or i16.");
- SDValue MaskVal =
- DAG.getConstant(OutSVT == MVT::i8 ? 0xFF : 0xFFFF, DL, InSVT);
- SDValue MaskVec = DAG.getNode(
- ISD::BUILD_VECTOR, DL, InVT,
- SmallVector<SDValue, 8>(InVT.getVectorNumElements(), MaskVal));
+ APInt Mask =
+ APInt::getLowBitsSet(InSVT.getSizeInBits(), OutSVT.getSizeInBits());
+ SDValue MaskVal = DAG.getConstant(Mask, DL, InVT);
for (auto &Reg : Regs)
- Reg = DAG.getNode(ISD::AND, DL, InVT, MaskVec, Reg);
+ Reg = DAG.getNode(ISD::AND, DL, InVT, MaskVal, Reg);
MVT UnpackedVT, PackedVT;
if (OutSVT == MVT::i8) {
@@ -26938,7 +29598,7 @@ combineVectorTruncationWithPACKUS(SDNode *N, SelectionDAG &DAG,
for (unsigned j = 1, e = InSVT.getSizeInBits() / OutSVT.getSizeInBits();
j < e; j *= 2, RegNum /= 2) {
for (unsigned i = 0; i < RegNum; i++)
- Regs[i] = DAG.getNode(ISD::BITCAST, DL, UnpackedVT, Regs[i]);
+ Regs[i] = DAG.getBitcast(UnpackedVT, Regs[i]);
for (unsigned i = 0; i < RegNum / 2; i++)
Regs[i] = DAG.getNode(X86ISD::PACKUS, DL, PackedVT, Regs[i * 2],
Regs[i * 2 + 1]);
@@ -26990,7 +29650,7 @@ combineVectorTruncationWithPACKSS(SDNode *N, SelectionDAG &DAG,
/// element that is extracted from a vector and then truncated, and it is
/// diffcult to do this optimization based on them.
static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
EVT OutVT = N->getValueType(0);
if (!OutVT.isVector())
return SDValue();
@@ -27005,7 +29665,7 @@ static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
// TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on
// SSE2, and we need to take care of it specially.
// AVX512 provides vpmovdb.
- if (!Subtarget->hasSSE2() || Subtarget->hasAVX2())
+ if (!Subtarget.hasSSE2() || Subtarget.hasAVX2())
return SDValue();
EVT OutSVT = OutVT.getVectorElementType();
@@ -27016,7 +29676,7 @@ static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
return SDValue();
// SSSE3's pshufb results in less instructions in the cases below.
- if (Subtarget->hasSSSE3() && NumElems == 8 &&
+ if (Subtarget.hasSSSE3() && NumElems == 8 &&
((OutSVT == MVT::i8 && InSVT != MVT::i64) ||
(InSVT == MVT::i32 && OutSVT == MVT::i16)))
return SDValue();
@@ -27026,20 +29686,17 @@ static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
// Split a long vector into vectors of legal type.
unsigned RegNum = InVT.getSizeInBits() / 128;
SmallVector<SDValue, 8> SubVec(RegNum);
- if (InSVT == MVT::i32) {
- for (unsigned i = 0; i < RegNum; i++)
- SubVec[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
- DAG.getIntPtrConstant(i * 4, DL));
- } else {
- for (unsigned i = 0; i < RegNum; i++)
- SubVec[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
- DAG.getIntPtrConstant(i * 2, DL));
- }
+ unsigned NumSubRegElts = 128 / InSVT.getSizeInBits();
+ EVT SubRegVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubRegElts);
- // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PAKCUS
+ for (unsigned i = 0; i < RegNum; i++)
+ SubVec[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubRegVT, In,
+ DAG.getIntPtrConstant(i * NumSubRegElts, DL));
+
+ // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
// for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
// truncate 2 x v4i32 to v8i16.
- if (Subtarget->hasSSE41() || OutSVT == MVT::i8)
+ if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
return combineVectorTruncationWithPACKUS(N, DAG, SubVec);
else if (InSVT == MVT::i32)
return combineVectorTruncationWithPACKSS(N, DAG, SubVec);
@@ -27047,20 +29704,30 @@ static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-static SDValue PerformTRUNCATECombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ EVT VT = N->getValueType(0);
+ SDValue Src = N->getOperand(0);
+ SDLoc DL(N);
+
// Try to detect AVG pattern first.
- SDValue Avg = detectAVGPattern(N->getOperand(0), N->getValueType(0), DAG,
- Subtarget, SDLoc(N));
- if (Avg.getNode())
+ if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
return Avg;
+ // The bitcast source is a direct mmx result.
+ // Detect bitcasts between i32 to x86mmx
+ if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
+ SDValue BCSrc = Src.getOperand(0);
+ if (BCSrc.getValueType() == MVT::x86mmx)
+ return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
+ }
+
return combineVectorTruncation(N, DAG, Subtarget);
}
/// Do target-specific dag combines on floating point negations.
-static SDValue PerformFNEGCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
EVT SVT = VT.getScalarType();
SDValue Arg = N->getOperand(0);
@@ -27074,7 +29741,7 @@ static SDValue PerformFNEGCombine(SDNode *N, SelectionDAG &DAG,
// use of a constant by performing (-0 - A*B) instead.
// FIXME: Check rounding control flags as well once it becomes available.
if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
- Arg->getFlags()->hasNoSignedZeros() && Subtarget->hasAnyFMA()) {
+ Arg->getFlags()->hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
return DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
Arg.getOperand(1), Zero);
@@ -27102,17 +29769,17 @@ static SDValue PerformFNEGCombine(SDNode *N, SelectionDAG &DAG,
}
static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
- if (VT.is512BitVector() && !Subtarget->hasDQI()) {
+ if (VT.is512BitVector() && !Subtarget.hasDQI()) {
// VXORPS, VORPS, VANDPS, VANDNPS are supported only under DQ extention.
// These logic operations may be executed in the integer domain.
SDLoc dl(N);
MVT IntScalar = MVT::getIntegerVT(VT.getScalarSizeInBits());
MVT IntVT = MVT::getVectorVT(IntScalar, VT.getVectorNumElements());
- SDValue Op0 = DAG.getNode(ISD::BITCAST, dl, IntVT, N->getOperand(0));
- SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, IntVT, N->getOperand(1));
+ SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
+ SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
unsigned IntOpcode = 0;
switch (N->getOpcode()) {
default: llvm_unreachable("Unexpected FP logic op");
@@ -27122,13 +29789,13 @@ static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
}
SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
- return DAG.getNode(ISD::BITCAST, dl, VT, IntOp);
+ return DAG.getBitcast(VT, IntOp);
}
return SDValue();
}
/// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
-static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
// F[X]OR(0.0, x) -> x
@@ -27145,7 +29812,7 @@ static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG,
}
/// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
-static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
// Only perform optimizations if UnsafeMath is used.
@@ -27165,9 +29832,9 @@ static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
N->getOperand(0), N->getOperand(1));
}
-static SDValue performFMinNumFMaxNumCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
- if (Subtarget->useSoftFloat())
+static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ if (Subtarget.useSoftFloat())
return SDValue();
// TODO: Check for global or instruction-level "nnan". In that case, we
@@ -27176,9 +29843,9 @@ static SDValue performFMinNumFMaxNumCombine(SDNode *N, SelectionDAG &DAG,
// should be an optional swap and FMAX/FMIN.
EVT VT = N->getValueType(0);
- if (!((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
- (Subtarget->hasSSE2() && (VT == MVT::f64 || VT == MVT::v2f64)) ||
- (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))))
+ if (!((Subtarget.hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
+ (Subtarget.hasSSE2() && (VT == MVT::f64 || VT == MVT::v2f64)) ||
+ (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))))
return SDValue();
// This takes at least 3 instructions, so favor a library call when operating
@@ -27222,8 +29889,8 @@ static SDValue performFMinNumFMaxNumCombine(SDNode *N, SelectionDAG &DAG,
}
/// Do target-specific dag combines on X86ISD::FAND nodes.
-static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
// FAND(0.0, x) -> 0.0
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
if (C->getValueAPF().isPosZero())
@@ -27238,8 +29905,8 @@ static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG,
}
/// Do target-specific dag combines on X86ISD::FANDN nodes
-static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
// FANDN(0.0, x) -> x
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
if (C->getValueAPF().isPosZero())
@@ -27253,9 +29920,8 @@ static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG,
return lowerX86FPLogicOp(N, DAG, Subtarget);
}
-static SDValue PerformBTCombine(SDNode *N,
- SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI) {
+static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI) {
// BT ignores high bits in the bit index operand.
SDValue Op1 = N->getOperand(1);
if (Op1.hasOneUse()) {
@@ -27272,21 +29938,19 @@ static SDValue PerformBTCombine(SDNode *N,
return SDValue();
}
-static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
- SDValue Op = N->getOperand(0);
- if (Op.getOpcode() == ISD::BITCAST)
- Op = Op.getOperand(0);
+static SDValue combineVZextMovl(SDNode *N, SelectionDAG &DAG) {
+ SDValue Op = peekThroughBitcasts(N->getOperand(0));
EVT VT = N->getValueType(0), OpVT = Op.getValueType();
if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
VT.getVectorElementType().getSizeInBits() ==
OpVT.getVectorElementType().getSizeInBits()) {
- return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
+ return DAG.getBitcast(VT, Op);
}
return SDValue();
}
-static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
if (!VT.isVector())
return SDValue();
@@ -27307,7 +29971,7 @@ static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
// EXTLOAD has a better solution on AVX2,
// it may be replaced with X86ISD::VSEXT node.
- if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
+ if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
if (!ISD::isNormalLoad(N00.getNode()))
return SDValue();
@@ -27325,7 +29989,7 @@ static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
/// to combine math ops, use an LEA, or use a complex addressing mode. This can
/// eliminate extend, add, and shift instructions.
static SDValue promoteSextBeforeAddNSW(SDNode *Sext, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
// TODO: This should be valid for other integer types.
EVT VT = Sext->getValueType(0);
if (VT != MVT::i64)
@@ -27397,14 +30061,106 @@ static SDValue getDivRem8(SDNode *N, SelectionDAG &DAG) {
return R.getValue(1);
}
-static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+/// Convert a SEXT or ZEXT of a vector to a SIGN_EXTEND_VECTOR_INREG or
+/// ZERO_EXTEND_VECTOR_INREG, this requires the splitting (or concatenating
+/// with UNDEFs) of the input to vectors of the same size as the target type
+/// which then extends the lowest elements.
+static SDValue combineToExtendVectorInReg(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
+ unsigned Opcode = N->getOpcode();
+ if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND)
+ return SDValue();
+ if (!DCI.isBeforeLegalizeOps())
+ return SDValue();
+ if (!Subtarget.hasSSE2())
+ return SDValue();
+
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT SVT = VT.getScalarType();
EVT InVT = N0.getValueType();
EVT InSVT = InVT.getScalarType();
+
+ // Input type must be a vector and we must be extending legal integer types.
+ if (!VT.isVector())
+ return SDValue();
+ if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
+ return SDValue();
+ if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
+ return SDValue();
+
+ // On AVX2+ targets, if the input/output types are both legal then we will be
+ // able to use SIGN_EXTEND/ZERO_EXTEND directly.
+ if (Subtarget.hasInt256() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
+ DAG.getTargetLoweringInfo().isTypeLegal(InVT))
+ return SDValue();
+
+ SDLoc DL(N);
+
+ auto ExtendVecSize = [&DAG](const SDLoc &DL, SDValue N, unsigned Size) {
+ EVT InVT = N.getValueType();
+ EVT OutVT = EVT::getVectorVT(*DAG.getContext(), InVT.getScalarType(),
+ Size / InVT.getScalarSizeInBits());
+ SmallVector<SDValue, 8> Opnds(Size / InVT.getSizeInBits(),
+ DAG.getUNDEF(InVT));
+ Opnds[0] = N;
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Opnds);
+ };
+
+ // If target-size is less than 128-bits, extend to a type that would extend
+ // to 128 bits, extend that and extract the original target vector.
+ if (VT.getSizeInBits() < 128 && !(128 % VT.getSizeInBits())) {
+ unsigned Scale = 128 / VT.getSizeInBits();
+ EVT ExVT =
+ EVT::getVectorVT(*DAG.getContext(), SVT, 128 / SVT.getSizeInBits());
+ SDValue Ex = ExtendVecSize(DL, N0, Scale * InVT.getSizeInBits());
+ SDValue SExt = DAG.getNode(Opcode, DL, ExVT, Ex);
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SExt,
+ DAG.getIntPtrConstant(0, DL));
+ }
+
+ // If target-size is 128-bits (or 256-bits on AVX2 target), then convert to
+ // ISD::*_EXTEND_VECTOR_INREG which ensures lowering to X86ISD::V*EXT.
+ // Also use this if we don't have SSE41 to allow the legalizer do its job.
+ if (!Subtarget.hasSSE41() || VT.is128BitVector() ||
+ (VT.is256BitVector() && Subtarget.hasInt256())) {
+ SDValue ExOp = ExtendVecSize(DL, N0, VT.getSizeInBits());
+ return Opcode == ISD::SIGN_EXTEND
+ ? DAG.getSignExtendVectorInReg(ExOp, DL, VT)
+ : DAG.getZeroExtendVectorInReg(ExOp, DL, VT);
+ }
+
+ // On pre-AVX2 targets, split into 128-bit nodes of
+ // ISD::*_EXTEND_VECTOR_INREG.
+ if (!Subtarget.hasInt256() && !(VT.getSizeInBits() % 128)) {
+ unsigned NumVecs = VT.getSizeInBits() / 128;
+ unsigned NumSubElts = 128 / SVT.getSizeInBits();
+ EVT SubVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumSubElts);
+ EVT InSubVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubElts);
+
+ SmallVector<SDValue, 8> Opnds;
+ for (unsigned i = 0, Offset = 0; i != NumVecs; ++i, Offset += NumSubElts) {
+ SDValue SrcVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InSubVT, N0,
+ DAG.getIntPtrConstant(Offset, DL));
+ SrcVec = ExtendVecSize(DL, SrcVec, 128);
+ SrcVec = Opcode == ISD::SIGN_EXTEND
+ ? DAG.getSignExtendVectorInReg(SrcVec, DL, SubVT)
+ : DAG.getZeroExtendVectorInReg(SrcVec, DL, SubVT);
+ Opnds.push_back(SrcVec);
+ }
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Opnds);
+ }
+
+ return SDValue();
+}
+
+static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+ EVT InVT = N0.getValueType();
SDLoc DL(N);
if (SDValue DivRem8 = getDivRem8(N, DAG))
@@ -27414,70 +30170,16 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
if (InVT == MVT::i1) {
SDValue Zero = DAG.getConstant(0, DL, VT);
SDValue AllOnes =
- DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT);
+ DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT);
return DAG.getNode(ISD::SELECT, DL, VT, N0, AllOnes, Zero);
}
return SDValue();
}
- if (VT.isVector() && Subtarget->hasSSE2()) {
- auto ExtendVecSize = [&DAG](SDLoc DL, SDValue N, unsigned Size) {
- EVT InVT = N.getValueType();
- EVT OutVT = EVT::getVectorVT(*DAG.getContext(), InVT.getScalarType(),
- Size / InVT.getScalarSizeInBits());
- SmallVector<SDValue, 8> Opnds(Size / InVT.getSizeInBits(),
- DAG.getUNDEF(InVT));
- Opnds[0] = N;
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Opnds);
- };
-
- // If target-size is less than 128-bits, extend to a type that would extend
- // to 128 bits, extend that and extract the original target vector.
- if (VT.getSizeInBits() < 128 && !(128 % VT.getSizeInBits()) &&
- (SVT == MVT::i64 || SVT == MVT::i32 || SVT == MVT::i16) &&
- (InSVT == MVT::i32 || InSVT == MVT::i16 || InSVT == MVT::i8)) {
- unsigned Scale = 128 / VT.getSizeInBits();
- EVT ExVT =
- EVT::getVectorVT(*DAG.getContext(), SVT, 128 / SVT.getSizeInBits());
- SDValue Ex = ExtendVecSize(DL, N0, Scale * InVT.getSizeInBits());
- SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, ExVT, Ex);
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SExt,
- DAG.getIntPtrConstant(0, DL));
- }
-
- // If target-size is 128-bits, then convert to ISD::SIGN_EXTEND_VECTOR_INREG
- // which ensures lowering to X86ISD::VSEXT (pmovsx*).
- if (VT.getSizeInBits() == 128 &&
- (SVT == MVT::i64 || SVT == MVT::i32 || SVT == MVT::i16) &&
- (InSVT == MVT::i32 || InSVT == MVT::i16 || InSVT == MVT::i8)) {
- SDValue ExOp = ExtendVecSize(DL, N0, 128);
- return DAG.getSignExtendVectorInReg(ExOp, DL, VT);
- }
-
- // On pre-AVX2 targets, split into 128-bit nodes of
- // ISD::SIGN_EXTEND_VECTOR_INREG.
- if (!Subtarget->hasInt256() && !(VT.getSizeInBits() % 128) &&
- (SVT == MVT::i64 || SVT == MVT::i32 || SVT == MVT::i16) &&
- (InSVT == MVT::i32 || InSVT == MVT::i16 || InSVT == MVT::i8)) {
- unsigned NumVecs = VT.getSizeInBits() / 128;
- unsigned NumSubElts = 128 / SVT.getSizeInBits();
- EVT SubVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumSubElts);
- EVT InSubVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubElts);
-
- SmallVector<SDValue, 8> Opnds;
- for (unsigned i = 0, Offset = 0; i != NumVecs;
- ++i, Offset += NumSubElts) {
- SDValue SrcVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InSubVT, N0,
- DAG.getIntPtrConstant(Offset, DL));
- SrcVec = ExtendVecSize(DL, SrcVec, 128);
- SrcVec = DAG.getSignExtendVectorInReg(SrcVec, DL, SubVT);
- Opnds.push_back(SrcVec);
- }
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Opnds);
- }
- }
+ if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
+ return V;
- if (Subtarget->hasAVX() && VT.is256BitVector())
+ if (Subtarget.hasAVX() && VT.is256BitVector())
if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget))
return R;
@@ -27487,8 +30189,8 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget* Subtarget) {
+static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
@@ -27497,7 +30199,7 @@ static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
EVT ScalarVT = VT.getScalarType();
- if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget->hasAnyFMA())
+ if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA())
return SDValue();
SDValue A = N->getOperand(0);
@@ -27526,9 +30228,9 @@ static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(Opcode, dl, VT, A, B, C);
}
-static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
// (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
// (and (i32 x86isd::setcc_carry), 1)
// This eliminates the zext. This transformation is necessary because
@@ -27563,6 +30265,9 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
}
}
+ if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
+ return V;
+
if (VT.is256BitVector())
if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget))
return R;
@@ -27573,10 +30278,10 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-// Optimize x == -y --> x+y == 0
-// x != -y --> x+y != 0
-static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget* Subtarget) {
+/// Optimize x == -y --> x+y == 0
+/// x != -y --> x+y != 0
+static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
@@ -27631,10 +30336,15 @@ static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
}
}
+ // For an SSE1-only target, lower to X86ISD::CMPP early to avoid scalarization
+ // via legalization because v4i32 is not a legal type.
+ if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32)
+ return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
+
return SDValue();
}
-static SDValue PerformGatherScatterCombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG) {
SDLoc DL(N);
// Gather and Scatter instructions use k-registers for masks. The type of
// the masks is v*i1. So the mask will be truncated anyway.
@@ -27648,11 +30358,11 @@ static SDValue PerformGatherScatterCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
-// Helper function of PerformSETCCCombine. It is to materialize "setb reg"
+// Helper function of performSETCCCombine. It is to materialize "setb reg"
// as "sbb reg,reg", since it can be extended without zext and produces
// an all-ones bit which is more useful than 0/1 in some cases.
-static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
- MVT VT) {
+static SDValue MaterializeSETB(const SDLoc &DL, SDValue EFLAGS,
+ SelectionDAG &DAG, MVT VT) {
if (VT == MVT::i8)
return DAG.getNode(ISD::AND, DL, VT,
DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
@@ -27667,9 +30377,9 @@ static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
}
// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
-static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDLoc DL(N);
X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
SDValue EFLAGS = N->getOperand(1);
@@ -27698,7 +30408,8 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
if (CC == X86::COND_B)
return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
- if (SDValue Flags = checkBoolTestSetCCCombine(EFLAGS, CC)) {
+ // Try to simplify the EFLAGS and condition code operands.
+ if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG)) {
SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
}
@@ -27706,28 +30417,28 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-// Optimize branch condition evaluation.
-//
-static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+/// Optimize branch condition evaluation.
+static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDLoc DL(N);
- SDValue Chain = N->getOperand(0);
- SDValue Dest = N->getOperand(1);
SDValue EFLAGS = N->getOperand(3);
X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
- if (SDValue Flags = checkBoolTestSetCCCombine(EFLAGS, CC)) {
+ // Try to simplify the EFLAGS and condition code operands.
+ // Make sure to not keep references to operands, as combineSetCCEFLAGS can
+ // RAUW them under us.
+ if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG)) {
SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
- return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
- Flags);
+ return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
+ N->getOperand(1), Cond, Flags);
}
return SDValue();
}
-static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
- SelectionDAG &DAG) {
+static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
+ SelectionDAG &DAG) {
// Take advantage of vector comparisons producing 0 or -1 in each lane to
// optimize away operation when it's from a constant.
//
@@ -27772,8 +30483,8 @@ static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
return SDValue();
}
-static SDValue PerformUINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
SDValue Op0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT InVT = Op0.getValueType();
@@ -27797,11 +30508,11 @@ static SDValue PerformUINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
// First try to optimize away the conversion entirely when it's
// conditionally from a constant. Vectors only.
- if (SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG))
+ if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
return Res;
// Now move on to more general possibilities.
@@ -27822,18 +30533,18 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
// Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
// a 32-bit target where SSE doesn't support i64->FP operations.
- if (!Subtarget->useSoftFloat() && Op0.getOpcode() == ISD::LOAD) {
+ if (!Subtarget.useSoftFloat() && Op0.getOpcode() == ISD::LOAD) {
LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
EVT LdVT = Ld->getValueType(0);
- // This transformation is not supported if the result type is f16
- if (VT == MVT::f16)
+ // This transformation is not supported if the result type is f16 or f128.
+ if (VT == MVT::f16 || VT == MVT::f128)
return SDValue();
if (!Ld->isVolatile() && !VT.isVector() &&
ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
- !Subtarget->is64Bit() && LdVT == MVT::i64) {
- SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
+ !Subtarget.is64Bit() && LdVT == MVT::i64) {
+ SDValue FILDChain = Subtarget.getTargetLowering()->BuildFILD(
SDValue(N, 0), LdVT, Ld->getChain(), Op0, DAG);
DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
return FILDChain;
@@ -27843,8 +30554,8 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
}
// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
-static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
- X86TargetLowering::DAGCombinerInfo &DCI) {
+static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
+ X86TargetLowering::DAGCombinerInfo &DCI) {
// If the LHS and RHS of the ADC node are zero, then it can't overflow and
// the result is either zero or one (depending on the input carry bit).
// Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
@@ -27868,10 +30579,10 @@ static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-// fold (add Y, (sete X, 0)) -> adc 0, Y
-// (add Y, (setne X, 0)) -> sbb -1, Y
-// (sub (sete X, 0), Y) -> sbb 0, Y
-// (sub (setne X, 0), Y) -> adc -1, Y
+/// fold (add Y, (sete X, 0)) -> adc 0, Y
+/// (add Y, (setne X, 0)) -> sbb -1, Y
+/// (sub (sete X, 0), Y) -> sbb 0, Y
+/// (sub (setne X, 0), Y) -> adc -1, Y
static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
SDLoc DL(N);
@@ -27909,24 +30620,163 @@ static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
DAG.getConstant(0, DL, OtherVal.getValueType()), NewCmp);
}
-/// PerformADDCombine - Do target-specific dag combines on integer adds.
-static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue detectSADPattern(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+
+ if (!VT.isVector() || !VT.isSimple() ||
+ !(VT.getVectorElementType() == MVT::i32))
+ return SDValue();
+
+ unsigned RegSize = 128;
+ if (Subtarget.hasBWI())
+ RegSize = 512;
+ else if (Subtarget.hasAVX2())
+ RegSize = 256;
+
+ // We only handle v16i32 for SSE2 / v32i32 for AVX2 / v64i32 for AVX512.
+ if (VT.getSizeInBits() / 4 > RegSize)
+ return SDValue();
+
+ // Detect the following pattern:
+ //
+ // 1: %2 = zext <N x i8> %0 to <N x i32>
+ // 2: %3 = zext <N x i8> %1 to <N x i32>
+ // 3: %4 = sub nsw <N x i32> %2, %3
+ // 4: %5 = icmp sgt <N x i32> %4, [0 x N] or [-1 x N]
+ // 5: %6 = sub nsw <N x i32> zeroinitializer, %4
+ // 6: %7 = select <N x i1> %5, <N x i32> %4, <N x i32> %6
+ // 7: %8 = add nsw <N x i32> %7, %vec.phi
+ //
+ // The last instruction must be a reduction add. The instructions 3-6 forms an
+ // ABSDIFF pattern.
+
+ // The two operands of reduction add are from PHI and a select-op as in line 7
+ // above.
+ SDValue SelectOp, Phi;
+ if (Op0.getOpcode() == ISD::VSELECT) {
+ SelectOp = Op0;
+ Phi = Op1;
+ } else if (Op1.getOpcode() == ISD::VSELECT) {
+ SelectOp = Op1;
+ Phi = Op0;
+ } else
+ return SDValue();
+
+ // Check the condition of the select instruction is greater-than.
+ SDValue SetCC = SelectOp->getOperand(0);
+ if (SetCC.getOpcode() != ISD::SETCC)
+ return SDValue();
+ ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
+ if (CC != ISD::SETGT)
+ return SDValue();
+
+ Op0 = SelectOp->getOperand(1);
+ Op1 = SelectOp->getOperand(2);
+
+ // The second operand of SelectOp Op1 is the negation of the first operand
+ // Op0, which is implemented as 0 - Op0.
+ if (!(Op1.getOpcode() == ISD::SUB &&
+ ISD::isBuildVectorAllZeros(Op1.getOperand(0).getNode()) &&
+ Op1.getOperand(1) == Op0))
+ return SDValue();
+
+ // The first operand of SetCC is the first operand of SelectOp, which is the
+ // difference between two input vectors.
+ if (SetCC.getOperand(0) != Op0)
+ return SDValue();
+
+ // The second operand of > comparison can be either -1 or 0.
+ if (!(ISD::isBuildVectorAllZeros(SetCC.getOperand(1).getNode()) ||
+ ISD::isBuildVectorAllOnes(SetCC.getOperand(1).getNode())))
+ return SDValue();
+
+ // The first operand of SelectOp is the difference between two input vectors.
+ if (Op0.getOpcode() != ISD::SUB)
+ return SDValue();
+
+ Op1 = Op0.getOperand(1);
+ Op0 = Op0.getOperand(0);
+
+ // Check if the operands of the diff are zero-extended from vectors of i8.
+ if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
+ Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
+ Op1.getOpcode() != ISD::ZERO_EXTEND ||
+ Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
+ return SDValue();
+
+ // SAD pattern detected. Now build a SAD instruction and an addition for
+ // reduction. Note that the number of elments of the result of SAD is less
+ // than the number of elements of its input. Therefore, we could only update
+ // part of elements in the reduction vector.
+
+ // Legalize the type of the inputs of PSADBW.
+ EVT InVT = Op0.getOperand(0).getValueType();
+ if (InVT.getSizeInBits() <= 128)
+ RegSize = 128;
+ else if (InVT.getSizeInBits() <= 256)
+ RegSize = 256;
+
+ unsigned NumConcat = RegSize / InVT.getSizeInBits();
+ SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
+ Ops[0] = Op0.getOperand(0);
+ MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
+ Op0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
+ Ops[0] = Op1.getOperand(0);
+ Op1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
+
+ // The output of PSADBW is a vector of i64.
+ MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
+ SDValue Sad = DAG.getNode(X86ISD::PSADBW, DL, SadVT, Op0, Op1);
+
+ // We need to turn the vector of i64 into a vector of i32.
+ // If the reduction vector is at least as wide as the psadbw result, just
+ // bitcast. If it's narrower, truncate - the high i32 of each i64 is zero
+ // anyway.
+ MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
+ if (VT.getSizeInBits() >= ResVT.getSizeInBits())
+ Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad);
+ else
+ Sad = DAG.getNode(ISD::TRUNCATE, DL, VT, Sad);
+
+ if (VT.getSizeInBits() > ResVT.getSizeInBits()) {
+ // Update part of elements of the reduction vector. This is done by first
+ // extracting a sub-vector from it, updating this sub-vector, and inserting
+ // it back.
+ SDValue SubPhi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResVT, Phi,
+ DAG.getIntPtrConstant(0, DL));
+ SDValue Res = DAG.getNode(ISD::ADD, DL, ResVT, Sad, SubPhi);
+ return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Phi, Res,
+ DAG.getIntPtrConstant(0, DL));
+ } else
+ return DAG.getNode(ISD::ADD, DL, VT, Sad, Phi);
+}
+
+static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ const SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags;
+ if (Flags->hasVectorReduction()) {
+ if (SDValue Sad = detectSADPattern(N, DAG, Subtarget))
+ return Sad;
+ }
EVT VT = N->getValueType(0);
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
// Try to synthesize horizontal adds from adds of shuffles.
- if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
- (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
+ if (((Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
+ (Subtarget.hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
isHorizontalBinOp(Op0, Op1, true))
return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
return OptimizeConditionalInDecrement(N, DAG);
}
-static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
@@ -27950,30 +30800,44 @@ static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
// Try to synthesize horizontal adds from adds of shuffles.
EVT VT = N->getValueType(0);
- if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
- (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
+ if (((Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
+ (Subtarget.hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
isHorizontalBinOp(Op0, Op1, true))
return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
return OptimizeConditionalInDecrement(N, DAG);
}
-/// performVZEXTCombine - Performs build vector combines
-static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineVZext(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDLoc DL(N);
MVT VT = N->getSimpleValueType(0);
+ MVT SVT = VT.getVectorElementType();
SDValue Op = N->getOperand(0);
MVT OpVT = Op.getSimpleValueType();
MVT OpEltVT = OpVT.getVectorElementType();
unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
- // (vzext (bitcast (vzext (x)) -> (vzext x)
- SDValue V = Op;
- while (V.getOpcode() == ISD::BITCAST)
- V = V.getOperand(0);
+ // Perform any constant folding.
+ if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
+ SmallVector<SDValue, 4> Vals;
+ for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
+ SDValue OpElt = Op.getOperand(i);
+ if (OpElt.getOpcode() == ISD::UNDEF) {
+ Vals.push_back(DAG.getUNDEF(SVT));
+ continue;
+ }
+ APInt Cst = cast<ConstantSDNode>(OpElt.getNode())->getAPIntValue();
+ assert(Cst.getBitWidth() == OpEltVT.getSizeInBits());
+ Cst = Cst.zextOrTrunc(SVT.getSizeInBits());
+ Vals.push_back(DAG.getConstant(Cst, DL, SVT));
+ }
+ return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Vals);
+ }
+ // (vzext (bitcast (vzext (x)) -> (vzext x)
+ SDValue V = peekThroughBitcasts(Op);
if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
MVT InnerVT = V.getSimpleValueType();
MVT InnerEltVT = InnerVT.getVectorElementType();
@@ -28022,61 +30886,111 @@ static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+/// Canonicalize (LSUB p, 1) -> (LADD p, -1).
+static SDValue combineLockSub(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ SDValue Chain = N->getOperand(0);
+ SDValue LHS = N->getOperand(1);
+ SDValue RHS = N->getOperand(2);
+ MVT VT = RHS.getSimpleValueType();
+ SDLoc DL(N);
+
+ auto *C = dyn_cast<ConstantSDNode>(RHS);
+ if (!C || C->getZExtValue() != 1)
+ return SDValue();
+
+ RHS = DAG.getConstant(-1, DL, VT);
+ MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
+ return DAG.getMemIntrinsicNode(X86ISD::LADD, DL,
+ DAG.getVTList(MVT::i32, MVT::Other),
+ {Chain, LHS, RHS}, VT, MMO);
+}
+
+// TEST (AND a, b) ,(AND a, b) -> TEST a, b
+static SDValue combineTestM(SDNode *N, SelectionDAG &DAG) {
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+
+ if (Op0 != Op1 || Op1->getOpcode() != ISD::AND)
+ return SDValue();
+
+ EVT VT = N->getValueType(0);
+ SDLoc DL(N);
+
+ return DAG.getNode(X86ISD::TESTM, DL, VT,
+ Op0->getOperand(0), Op0->getOperand(1));
+}
+
+static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ MVT VT = N->getSimpleValueType(0);
+ SDLoc DL(N);
+
+ if (N->getOperand(0) == N->getOperand(1)) {
+ if (N->getOpcode() == X86ISD::PCMPEQ)
+ return getOnesVector(VT, Subtarget, DAG, DL);
+ if (N->getOpcode() == X86ISD::PCMPGT)
+ return getZeroVector(VT, Subtarget, DAG, DL);
+ }
+
+ return SDValue();
+}
+
+
SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
switch (N->getOpcode()) {
default: break;
- case ISD::EXTRACT_VECTOR_ELT:
- return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
+ case ISD::EXTRACT_VECTOR_ELT: return combineExtractVectorElt(N, DAG, DCI);
case ISD::VSELECT:
case ISD::SELECT:
- case X86ISD::SHRUNKBLEND:
- return PerformSELECTCombine(N, DAG, DCI, Subtarget);
- case ISD::BITCAST: return PerformBITCASTCombine(N, DAG, Subtarget);
- case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
- case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
- case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
- case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
- case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
+ case X86ISD::SHRUNKBLEND: return combineSelect(N, DAG, DCI, Subtarget);
+ case ISD::BITCAST: return combineBitcast(N, DAG, Subtarget);
+ case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
+ case ISD::ADD: return combineAdd(N, DAG, Subtarget);
+ case ISD::SUB: return combineSub(N, DAG, Subtarget);
+ case X86ISD::ADC: return combineADC(N, DAG, DCI);
+ case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
case ISD::SHL:
case ISD::SRA:
- case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
- case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
- case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
- case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
- case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
- case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
- case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
- case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
- case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
- case ISD::UINT_TO_FP: return PerformUINT_TO_FPCombine(N, DAG, Subtarget);
- case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
- case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
- case ISD::FNEG: return PerformFNEGCombine(N, DAG, Subtarget);
- case ISD::TRUNCATE: return PerformTRUNCATECombine(N, DAG, Subtarget);
+ case ISD::SRL: return combineShift(N, DAG, DCI, Subtarget);
+ case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget);
+ case ISD::OR: return combineOr(N, DAG, DCI, Subtarget);
+ case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget);
+ case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget);
+ case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget);
+ case ISD::STORE: return combineStore(N, DAG, Subtarget);
+ case ISD::MSTORE: return combineMaskedStore(N, DAG, Subtarget);
+ case ISD::SINT_TO_FP: return combineSIntToFP(N, DAG, Subtarget);
+ case ISD::UINT_TO_FP: return combineUIntToFP(N, DAG, Subtarget);
+ case ISD::FADD:
+ case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
+ case ISD::FNEG: return combineFneg(N, DAG, Subtarget);
+ case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
case X86ISD::FXOR:
- case X86ISD::FOR: return PerformFORCombine(N, DAG, Subtarget);
+ case X86ISD::FOR: return combineFOr(N, DAG, Subtarget);
case X86ISD::FMIN:
- case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
+ case X86ISD::FMAX: return combineFMinFMax(N, DAG);
case ISD::FMINNUM:
- case ISD::FMAXNUM: return performFMinNumFMaxNumCombine(N, DAG,
- Subtarget);
- case X86ISD::FAND: return PerformFANDCombine(N, DAG, Subtarget);
- case X86ISD::FANDN: return PerformFANDNCombine(N, DAG, Subtarget);
- case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
- case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
+ case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget);
+ case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
+ case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
+ case X86ISD::BT: return combineBT(N, DAG, DCI);
+ case X86ISD::VZEXT_MOVL: return combineVZextMovl(N, DAG);
case ISD::ANY_EXTEND:
- case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
- case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
- case ISD::SIGN_EXTEND_INREG:
- return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
- case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
- case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
- case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
- case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
+ case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget);
+ case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget);
+ case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
+ case ISD::SETCC: return combineSetCC(N, DAG, Subtarget);
+ case X86ISD::SETCC: return combineX86SetCC(N, DAG, DCI, Subtarget);
+ case X86ISD::BRCOND: return combineBrCond(N, DAG, DCI, Subtarget);
+ case X86ISD::VZEXT: return combineVZext(N, DAG, DCI, Subtarget);
case X86ISD::SHUFP: // Handle all target specific shuffles
+ case X86ISD::INSERTPS:
case X86ISD::PALIGNR:
+ case X86ISD::VSHLDQ:
+ case X86ISD::VSRLDQ:
case X86ISD::BLENDI:
case X86ISD::UNPCKH:
case X86ISD::UNPCKL:
@@ -28086,23 +31000,36 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::PSHUFD:
case X86ISD::PSHUFHW:
case X86ISD::PSHUFLW:
+ case X86ISD::MOVSHDUP:
+ case X86ISD::MOVSLDUP:
+ case X86ISD::MOVDDUP:
case X86ISD::MOVSS:
case X86ISD::MOVSD:
+ case X86ISD::VPPERM:
+ case X86ISD::VPERMI:
+ case X86ISD::VPERMV:
+ case X86ISD::VPERMV3:
+ case X86ISD::VPERMIL2:
case X86ISD::VPERMILPI:
+ case X86ISD::VPERMILPV:
case X86ISD::VPERM2X128:
- case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
- case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
+ case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
+ case ISD::FMA: return combineFMA(N, DAG, Subtarget);
case ISD::MGATHER:
- case ISD::MSCATTER: return PerformGatherScatterCombine(N, DAG);
+ case ISD::MSCATTER: return combineGatherScatter(N, DAG);
+ case X86ISD::LSUB: return combineLockSub(N, DAG, Subtarget);
+ case X86ISD::TESTM: return combineTestM(N, DAG);
+ case X86ISD::PCMPEQ:
+ case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
}
return SDValue();
}
-/// isTypeDesirableForOp - Return true if the target has native support for
-/// the specified value type and it is 'desirable' to use the type for the
-/// given node type. e.g. On x86 i16 is legal, but undesirable since i16
-/// instruction encodings are longer and some i16 instructions are slow.
+/// Return true if the target has native support for the specified value type
+/// and it is 'desirable' to use the type for the given node type. e.g. On x86
+/// i16 is legal, but undesirable since i16 instruction encodings are longer and
+/// some i16 instructions are slow.
bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
if (!isTypeLegal(VT))
return false;
@@ -28140,9 +31067,9 @@ bool X86TargetLowering::hasCopyImplyingStackAdjustment(
[](const MachineInstr &RI) { return RI.isCopy(); });
}
-/// IsDesirableToPromoteOp - This method query the target whether it is
-/// beneficial for dag combiner to promote the specified node. If true, it
-/// should return the desired promotion type by reference.
+/// This method query the target whether it is beneficial for dag combiner to
+/// promote the specified node. If true, it should return the desired promotion
+/// type by reference.
bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
EVT VT = Op.getValueType();
if (VT != MVT::i16)
@@ -28152,23 +31079,6 @@ bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
bool Commute = false;
switch (Op.getOpcode()) {
default: break;
- case ISD::LOAD: {
- LoadSDNode *LD = cast<LoadSDNode>(Op);
- // If the non-extending load has a single use and it's not live out, then it
- // might be folded.
- if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
- Op.hasOneUse()*/) {
- for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
- UE = Op.getNode()->use_end(); UI != UE; ++UI) {
- // The only case where we'd want to promote LOAD (rather then it being
- // promoted as an operand is when it's only use is liveout.
- if (UI->getOpcode() != ISD::CopyToReg)
- return false;
- }
- }
- Promote = true;
- break;
- }
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND:
@@ -28250,7 +31160,7 @@ static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
- std::string AsmStr = IA->getAsmString();
+ const std::string &AsmStr = IA->getAsmString();
IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
if (!Ty || Ty->getBitWidth() % 16 != 0)
@@ -28323,8 +31233,7 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
return false;
}
-/// getConstraintType - Given a constraint letter, return the type of
-/// constraint it is for this target.
+/// Given a constraint letter, return the type of constraint for this target.
X86TargetLowering::ConstraintType
X86TargetLowering::getConstraintType(StringRef Constraint) const {
if (Constraint.size() == 1) {
@@ -28403,13 +31312,13 @@ TargetLowering::ConstraintWeight
weight = CW_SpecificReg;
break;
case 'y':
- if (type->isX86_MMXTy() && Subtarget->hasMMX())
+ if (type->isX86_MMXTy() && Subtarget.hasMMX())
weight = CW_SpecificReg;
break;
case 'x':
case 'Y':
- if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
- ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
+ if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
+ ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasFp256()))
weight = CW_Register;
break;
case 'I':
@@ -28471,25 +31380,25 @@ TargetLowering::ConstraintWeight
return weight;
}
-/// LowerXConstraint - try to replace an X constraint, which matches anything,
-/// with another that has more specific requirements based on the type of the
-/// corresponding operand.
+/// Try to replace an X constraint, which matches anything, with another that
+/// has more specific requirements based on the type of the corresponding
+/// operand.
const char *X86TargetLowering::
LowerXConstraint(EVT ConstraintVT) const {
// FP X constraints get lowered to SSE1/2 registers if available, otherwise
// 'f' like normal targets.
if (ConstraintVT.isFloatingPoint()) {
- if (Subtarget->hasSSE2())
+ if (Subtarget.hasSSE2())
return "Y";
- if (Subtarget->hasSSE1())
+ if (Subtarget.hasSSE1())
return "x";
}
return TargetLowering::LowerXConstraint(ConstraintVT);
}
-/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
-/// vector. If it is invalid, don't add anything to Ops.
+/// Lower the specified operand into the Ops vector.
+/// If it is invalid, don't add anything to Ops.
void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue>&Ops,
@@ -28532,7 +31441,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
case 'L':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
- (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
+ (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
Op.getValueType());
break;
@@ -28605,7 +31514,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
// In any sort of PIC mode addresses need to be computed at runtime by
// adding in a register or some sort of table lookup. These can't
// be used as immediates.
- if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
+ if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC())
return;
// If we are in non-pic codegen mode, we allow the address of a global (with
@@ -28639,8 +31548,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
const GlobalValue *GV = GA->getGlobal();
// If we require an extra load to get this address, as in PIC mode, we
// can't accept it.
- if (isGlobalStubReference(
- Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
+ if (isGlobalStubReference(Subtarget.classifyGlobalReference(GV)))
return;
Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
@@ -28656,6 +31564,65 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}
+/// Check if \p RC is a general purpose register class.
+/// I.e., GR* or one of their variant.
+static bool isGRClass(const TargetRegisterClass &RC) {
+ switch (RC.getID()) {
+ case X86::GR8RegClassID:
+ case X86::GR8_ABCD_LRegClassID:
+ case X86::GR8_ABCD_HRegClassID:
+ case X86::GR8_NOREXRegClassID:
+ case X86::GR16RegClassID:
+ case X86::GR16_ABCDRegClassID:
+ case X86::GR16_NOREXRegClassID:
+ case X86::GR32RegClassID:
+ case X86::GR32_ABCDRegClassID:
+ case X86::GR32_TCRegClassID:
+ case X86::GR32_NOREXRegClassID:
+ case X86::GR32_NOAXRegClassID:
+ case X86::GR32_NOSPRegClassID:
+ case X86::GR32_NOREX_NOSPRegClassID:
+ case X86::GR32_ADRegClassID:
+ case X86::GR64RegClassID:
+ case X86::GR64_ABCDRegClassID:
+ case X86::GR64_TCRegClassID:
+ case X86::GR64_TCW64RegClassID:
+ case X86::GR64_NOREXRegClassID:
+ case X86::GR64_NOSPRegClassID:
+ case X86::GR64_NOREX_NOSPRegClassID:
+ case X86::LOW32_ADDR_ACCESSRegClassID:
+ case X86::LOW32_ADDR_ACCESS_RBPRegClassID:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/// Check if \p RC is a vector register class.
+/// I.e., FR* / VR* or one of their variant.
+static bool isFRClass(const TargetRegisterClass &RC) {
+ switch (RC.getID()) {
+ case X86::FR32RegClassID:
+ case X86::FR32XRegClassID:
+ case X86::FR64RegClassID:
+ case X86::FR64XRegClassID:
+ case X86::FR128RegClassID:
+ case X86::VR64RegClassID:
+ case X86::VR128RegClassID:
+ case X86::VR128LRegClassID:
+ case X86::VR128HRegClassID:
+ case X86::VR128XRegClassID:
+ case X86::VR256RegClassID:
+ case X86::VR256LRegClassID:
+ case X86::VR256HRegClassID:
+ case X86::VR256XRegClassID:
+ case X86::VR512RegClassID:
+ return true;
+ default:
+ return false;
+ }
+}
+
std::pair<unsigned, const TargetRegisterClass *>
X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
StringRef Constraint,
@@ -28670,7 +31637,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
// RIP in the class. Do they matter any more here than they do
// in the normal allocation?
case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
if (VT == MVT::i32 || VT == MVT::f32)
return std::make_pair(0U, &X86::GR32RegClass);
if (VT == MVT::i16)
@@ -28698,7 +31665,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
return std::make_pair(0U, &X86::GR8RegClass);
if (VT == MVT::i16)
return std::make_pair(0U, &X86::GR16RegClass);
- if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
+ if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
return std::make_pair(0U, &X86::GR32RegClass);
return std::make_pair(0U, &X86::GR64RegClass);
case 'R': // LEGACY_REGS
@@ -28706,7 +31673,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
return std::make_pair(0U, &X86::GR8_NOREXRegClass);
if (VT == MVT::i16)
return std::make_pair(0U, &X86::GR16_NOREXRegClass);
- if (VT == MVT::i32 || !Subtarget->is64Bit())
+ if (VT == MVT::i32 || !Subtarget.is64Bit())
return std::make_pair(0U, &X86::GR32_NOREXRegClass);
return std::make_pair(0U, &X86::GR64_NOREXRegClass);
case 'f': // FP Stack registers.
@@ -28718,13 +31685,13 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
return std::make_pair(0U, &X86::RFP64RegClass);
return std::make_pair(0U, &X86::RFP80RegClass);
case 'y': // MMX_REGS if MMX allowed.
- if (!Subtarget->hasMMX()) break;
+ if (!Subtarget.hasMMX()) break;
return std::make_pair(0U, &X86::VR64RegClass);
case 'Y': // SSE_REGS if SSE2 allowed
- if (!Subtarget->hasSSE2()) break;
+ if (!Subtarget.hasSSE2()) break;
// FALL THROUGH.
case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
- if (!Subtarget->hasSSE1()) break;
+ if (!Subtarget.hasSSE1()) break;
switch (VT.SimpleTy) {
default: break;
@@ -28817,8 +31784,11 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
// return "eax". This should even work for things like getting 64bit integer
// registers when given an f64 type.
const TargetRegisterClass *Class = Res.second;
- if (Class == &X86::GR8RegClass || Class == &X86::GR16RegClass ||
- Class == &X86::GR32RegClass || Class == &X86::GR64RegClass) {
+ // The generic code will match the first register class that contains the
+ // given register. Thus, based on the ordering of the tablegened file,
+ // the "plain" GR classes might not come first.
+ // Therefore, use a helper method.
+ if (isGRClass(*Class)) {
unsigned Size = VT.getSizeInBits();
if (Size == 1) Size = 8;
unsigned DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
@@ -28834,11 +31804,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
Res.first = 0;
Res.second = nullptr;
}
- } else if (Class == &X86::FR32RegClass || Class == &X86::FR64RegClass ||
- Class == &X86::VR128RegClass || Class == &X86::VR256RegClass ||
- Class == &X86::FR32XRegClass || Class == &X86::FR64XRegClass ||
- Class == &X86::VR128XRegClass || Class == &X86::VR256XRegClass ||
- Class == &X86::VR512RegClass) {
+ } else if (isFRClass(*Class)) {
// Handle references to XMM physical registers that got mapped into the
// wrong class. This can happen with constraints like {xmm0} where the
// target independent register mapper will just pick the first match it can
@@ -28907,7 +31873,7 @@ bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeSet Attr) const {
}
void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
- if (!Subtarget->is64Bit())
+ if (!Subtarget.is64Bit())
return;
// Update IsSplitCSR in X86MachineFunctionInfo.
@@ -28919,12 +31885,12 @@ void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
void X86TargetLowering::insertCopiesSplitCSR(
MachineBasicBlock *Entry,
const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
- const X86RegisterInfo *TRI = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
if (!IStart)
return;
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
MachineBasicBlock::iterator MBBI = Entry->begin();
for (const MCPhysReg *I = IStart; *I; ++I) {
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index b67958a9c4988..d826f1ec3e05b 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -75,7 +75,7 @@ namespace llvm {
///
CALL,
- /// This operation implements the lowering for readcyclecounter
+ /// This operation implements the lowering for readcyclecounter.
RDTSC_DAG,
/// X86 Read Time-Stamp Counter and Processor ID.
@@ -106,10 +106,6 @@ namespace llvm {
/// 0s or 1s. Generally DTRT for C/C++ with NaNs.
FSETCC,
- /// X86 MOVMSK{pd|ps}, extracts sign bits of two or four FP values,
- /// result in an integer GPR. Needs masking for scalar result.
- FGETSIGNx86,
-
/// X86 conditional moves. Operand 0 and operand 1 are the two values
/// to select from. Operand 2 is the condition code, and operand 3 is the
/// flag operand produced by a CMP or TEST instruction. It also writes a
@@ -191,9 +187,6 @@ namespace llvm {
/// Bitwise Logical AND NOT of Packed FP values.
ANDNP,
- /// Copy integer sign.
- PSIGN,
-
/// Blend where the selector is an immediate.
BLENDI,
@@ -214,30 +207,31 @@ namespace llvm {
FMIN_RND,
FSQRT_RND,
- // FP vector get exponent
+ // FP vector get exponent.
FGETEXP_RND,
- // Extract Normalized Mantissas
+ // Extract Normalized Mantissas.
VGETMANT,
- // FP Scale
+ // FP Scale.
SCALEF,
+ SCALEFS,
+
// Integer add/sub with unsigned saturation.
ADDUS,
SUBUS,
+
// Integer add/sub with signed saturation.
ADDS,
SUBS,
- // Unsigned Integer average
+
+ // Unsigned Integer average.
AVG,
- /// Integer horizontal add.
- HADD,
- /// Integer horizontal sub.
+ /// Integer horizontal add/sub.
+ HADD,
HSUB,
- /// Floating point horizontal add.
+ /// Floating point horizontal add/sub.
FHADD,
-
- /// Floating point horizontal sub.
FHSUB,
// Integer absolute value
@@ -256,7 +250,8 @@ namespace llvm {
/// Note that these typically require refinement
/// in order to obtain suitable precision.
FRSQRT, FRCP,
-
+ FRSQRTS, FRCPS,
+
// Thread Local Storage.
TLSADDR,
@@ -277,6 +272,9 @@ namespace llvm {
// SjLj exception handling longjmp.
EH_SJLJ_LONGJMP,
+ // SjLj exception handling dispatch.
+ EH_SJLJ_SETUP_DISPATCH,
+
/// Tail call return. See X86TargetLowering::LowerCall for
/// the list of operands.
TC_RETURN,
@@ -286,7 +284,6 @@ namespace llvm {
// Vector integer zero-extend.
VZEXT,
-
// Vector integer signed-extend.
VSEXT,
@@ -313,6 +310,11 @@ namespace llvm {
// Vector shift elements
VSHL, VSRL, VSRA,
+ // Vector variable shift right arithmetic.
+ // Unlike ISD::SRA, in case shift count greater then element size
+ // use sign bit to fill destination data element.
+ VSRAV,
+
// Vector shift elements by immediate
VSHLI, VSRLI, VSRAI,
@@ -327,6 +329,8 @@ namespace llvm {
// Vector integer comparisons, the result is in a mask vector.
PCMPEQM, PCMPGTM,
+ MULTISHIFT,
+
/// Vector comparison generating mask bits for fp and
/// integer signed and unsigned data types.
CMPM,
@@ -338,11 +342,13 @@ namespace llvm {
ADD, SUB, ADC, SBB, SMUL,
INC, DEC, OR, XOR, AND,
- BEXTR, // Bit field extract
+ // Bit field extract.
+ BEXTR,
- UMUL, // LOW, HI, FLAGS = umul LHS, RHS
+ // LOW, HI, FLAGS = umul LHS, RHS.
+ UMUL,
- // 8-bit SMUL/UMUL - AX, FLAGS = smul8/umul8 AL, RHS
+ // 8-bit SMUL/UMUL - AX, FLAGS = smul8/umul8 AL, RHS.
SMUL8, UMUL8,
// 8-bit divrem that zero-extend the high result (AH).
@@ -352,6 +358,9 @@ namespace llvm {
// X86-specific multiply by immediate.
MUL_IMM,
+ // Vector sign bit extraction.
+ MOVMSK,
+
// Vector bitwise comparisons.
PTEST,
@@ -362,22 +371,23 @@ namespace llvm {
TESTM,
TESTNM,
- // OR/AND test for masks
+ // OR/AND test for masks.
KORTEST,
KTEST,
// Several flavors of instructions with vector shuffle behaviors.
+ // Saturated signed/unnsigned packing.
PACKSS,
PACKUS,
- // Intra-lane alignr
+ // Intra-lane alignr.
PALIGNR,
- // AVX512 inter-lane alignr
+ // AVX512 inter-lane alignr.
VALIGN,
PSHUFD,
PSHUFHW,
PSHUFLW,
SHUFP,
- //Shuffle Packed Values at 128-bit granularity
+ //Shuffle Packed Values at 128-bit granularity.
SHUF128,
MOVDDUP,
MOVSHDUP,
@@ -393,61 +403,82 @@ namespace llvm {
UNPCKH,
VPERMILPV,
VPERMILPI,
+ VPERMI,
+ VPERM2X128,
+
+ // Variable Permute (VPERM).
+ // Res = VPERMV MaskV, V0
VPERMV,
+
+ // 3-op Variable Permute (VPERMT2).
+ // Res = VPERMV3 V0, MaskV, V1
VPERMV3,
+
+ // 3-op Variable Permute overwriting the index (VPERMI2).
+ // Res = VPERMIV3 V0, MaskV, V1
VPERMIV3,
- VPERMI,
- VPERM2X128,
- // Bitwise ternary logic
+
+ // Bitwise ternary logic.
VPTERNLOG,
- // Fix Up Special Packed Float32/64 values
+ // Fix Up Special Packed Float32/64 values.
VFIXUPIMM,
- // Range Restriction Calculation For Packed Pairs of Float32/64 values
+ VFIXUPIMMS,
+ // Range Restriction Calculation For Packed Pairs of Float32/64 values.
VRANGE,
- // Reduce - Perform Reduction Transformation on scalar\packed FP
+ // Reduce - Perform Reduction Transformation on scalar\packed FP.
VREDUCE,
- // RndScale - Round FP Values To Include A Given Number Of Fraction Bits
+ // RndScale - Round FP Values To Include A Given Number Of Fraction Bits.
VRNDSCALE,
- // VFPCLASS - Tests Types Of a FP Values for packed types.
- VFPCLASS,
- // VFPCLASSS - Tests Types Of a FP Values for scalar types.
- VFPCLASSS,
- // Broadcast scalar to vector
+ // Tests Types Of a FP Values for packed types.
+ VFPCLASS,
+ // Tests Types Of a FP Values for scalar types.
+ VFPCLASSS,
+
+ // Broadcast scalar to vector.
VBROADCAST,
- // Broadcast mask to vector
+ // Broadcast mask to vector.
VBROADCASTM,
- // Broadcast subvector to vector
+ // Broadcast subvector to vector.
SUBV_BROADCAST,
- // Insert/Extract vector element
+
+ // Insert/Extract vector element.
VINSERT,
VEXTRACT,
/// SSE4A Extraction and Insertion.
EXTRQI, INSERTQI,
- // XOP variable/immediate rotations
+ // XOP variable/immediate rotations.
VPROT, VPROTI,
- // XOP arithmetic/logical shifts
+ // XOP arithmetic/logical shifts.
VPSHA, VPSHL,
- // XOP signed/unsigned integer comparisons
+ // XOP signed/unsigned integer comparisons.
VPCOM, VPCOMU,
+ // XOP packed permute bytes.
+ VPPERM,
+ // XOP two source permutation.
+ VPERMIL2,
- // Vector multiply packed unsigned doubleword integers
+ // Vector multiply packed unsigned doubleword integers.
PMULUDQ,
- // Vector multiply packed signed doubleword integers
+ // Vector multiply packed signed doubleword integers.
PMULDQ,
- // Vector Multiply Packed UnsignedIntegers with Round and Scale
+ // Vector Multiply Packed UnsignedIntegers with Round and Scale.
MULHRS,
- // Multiply and Add Packed Integers
+
+ // Multiply and Add Packed Integers.
VPMADDUBSW, VPMADDWD,
- // FMA nodes
+ VPMADD52L, VPMADD52H,
+
+ // FMA nodes.
FMADD,
FNMADD,
FMSUB,
FNMSUB,
FMADDSUB,
FMSUBADD,
- // FMA with rounding mode
+
+ // FMA with rounding mode.
FMADD_RND,
FNMADD_RND,
FMSUB_RND,
@@ -455,17 +486,20 @@ namespace llvm {
FMADDSUB_RND,
FMSUBADD_RND,
- // Compress and expand
+ // Compress and expand.
COMPRESS,
EXPAND,
- //Convert Unsigned/Integer to Scalar Floating-Point Value
- //with rounding mode
+ // Convert Unsigned/Integer to Scalar Floating-Point Value
+ // with rounding mode.
SINT_TO_FP_RND,
UINT_TO_FP_RND,
// Vector float/double to signed/unsigned integer.
FP_TO_SINT_RND, FP_TO_UINT_RND,
+ // Scalar float/double to signed/unsigned integer.
+ SCALAR_FP_TO_SINT_RND, SCALAR_FP_TO_UINT_RND,
+
// Save xmm argument registers to the stack, according to %al. An operator
// is needed so that this can be expanded with control flow.
VASTART_SAVE_XMM_REGS,
@@ -478,11 +512,9 @@ namespace llvm {
// falls back to heap allocation if not.
SEG_ALLOCA,
- // Memory barrier
+ // Memory barriers.
MEMBARRIER,
MFENCE,
- SFENCE,
- LFENCE,
// Store FP status word into i16 register.
FNSTSW16r,
@@ -497,19 +529,26 @@ namespace llvm {
// indicate whether it is valid in CF.
RDSEED,
+ // SSE42 string comparisons.
PCMPISTRI,
PCMPESTRI,
// Test if in transactional execution.
XTEST,
- // ERI instructions
+ // ERI instructions.
RSQRT28, RCP28, EXP2,
// Compare and swap.
LCMPXCHG_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
LCMPXCHG8_DAG,
LCMPXCHG16_DAG,
+ LCMPXCHG8_SAVE_EBX_DAG,
+ LCMPXCHG16_SAVE_RBX_DAG,
+
+ /// LOCK-prefixed arithmetic read-modify-write instructions.
+ /// EFLAGS, OUTCHAIN = LADD(INCHAIN, PTR, RHS)
+ LADD, LSUB, LOR, LXOR, LAND,
// Load, scalar_to_vector, and zero extend.
VZEXT_LOAD,
@@ -551,10 +590,10 @@ namespace llvm {
VAARG_64
// WARNING: Do not add anything in the end unless you want the node to
- // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be
- // thought as target memory ops!
+ // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all
+ // opcodes will be thought as target memory ops!
};
- }
+ } // end namespace X86ISD
/// Define some predicates that are used for node matching.
namespace X86 {
@@ -606,13 +645,12 @@ namespace llvm {
bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
bool hasSymbolicDisplacement = true);
-
/// Determines whether the callee is required to pop its
/// own arguments. Callee pop is necessary to support tail calls.
bool isCalleePop(CallingConv::ID CallingConv,
- bool is64Bit, bool IsVarArg, bool TailCallOpt);
+ bool is64Bit, bool IsVarArg, bool GuaranteeTCO);
- }
+ } // end namespace X86
//===--------------------------------------------------------------------===//
// X86 Implementation of the TargetLowering interface
@@ -679,13 +717,20 @@ namespace llvm {
///
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+ /// Places new result values for the node in Results (their number
+ /// and types must exactly match those of the original return values of
+ /// the node), or leaves Results empty, which indicates that the node is not
+ /// to be custom lowered after all.
+ void LowerOperationWrapper(SDNode *N,
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const override;
+
/// Replace the results of node with an illegal result
/// type with new values built out of custom code.
///
void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const override;
-
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
/// Return true if the target has native support for
@@ -705,9 +750,8 @@ namespace llvm {
bool hasCopyImplyingStackAdjustment(MachineFunction *MF) const override;
MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB) const override;
-
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *MBB) const override;
/// This method returns the name of a target specific DAG node.
const char *getTargetNodeName(unsigned Opcode) const override;
@@ -716,6 +760,12 @@ namespace llvm {
bool isCheapToSpeculateCtlz() const override;
+ bool hasBitPreservingFPLogic(EVT VT) const override {
+ return VT == MVT::f32 || VT == MVT::f64 || VT.isVector();
+ }
+
+ bool hasAndNotCompare(SDValue Y) const override;
+
/// Return the value type to use for ISD::SETCC.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
EVT VT) const override;
@@ -914,16 +964,21 @@ namespace llvm {
unsigned
getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
+ virtual bool needsFixedCatchObjects() const override;
+
/// This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo) const override;
- /// Return true if the target stores stack protector cookies at a fixed
- /// offset in some non-standard address space, and populates the address
- /// space and offset as appropriate.
- bool getStackCookieLocation(unsigned &AddressSpace,
- unsigned &Offset) const override;
+ /// If the target has a standard location for the stack protector cookie,
+ /// returns the address of that location. Otherwise, returns nullptr.
+ Value *getIRStackGuard(IRBuilder<> &IRB) const override;
+
+ bool useLoadStackGuardNode() const override;
+ void insertSSPDeclarations(Module &M) const override;
+ Value *getSDagStackGuard(const Module &M) const override;
+ Value *getSSPStackGuardCheck(const Module &M) const override;
/// Return true if the target stores SafeStack pointer at a fixed offset in
/// some non-standard address space, and populates the address space and
@@ -935,21 +990,24 @@ namespace llvm {
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
- bool useLoadStackGuardNode() const override;
/// \brief Customize the preferred legalization strategy for certain types.
LegalizeTypeAction getPreferredVectorAction(EVT VT) const override;
bool isIntDivCheap(EVT VT, AttributeSet Attr) const override;
+ bool supportSwiftError() const override {
+ return true;
+ }
+
protected:
std::pair<const TargetRegisterClass *, uint8_t>
findRepresentativeClass(const TargetRegisterInfo *TRI,
MVT VT) const override;
private:
- /// Keep a pointer to the X86Subtarget around so that we can
+ /// Keep a reference to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
- const X86Subtarget *Subtarget;
+ const X86Subtarget &Subtarget;
/// Select between SSE or x87 floating point ops.
/// When SSE is available, use it for f32 operations.
@@ -969,16 +1027,15 @@ namespace llvm {
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
+ const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
- SDValue LowerMemArgument(SDValue Chain,
- CallingConv::ID CallConv,
+ SDValue LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
const SmallVectorImpl<ISD::InputArg> &ArgInfo,
- SDLoc dl, SelectionDAG &DAG,
- const CCValAssign &VA, MachineFrameInfo *MFI,
- unsigned i) const;
+ const SDLoc &dl, SelectionDAG &DAG,
+ const CCValAssign &VA, MachineFrameInfo *MFI,
+ unsigned i) const;
SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
- SDLoc dl, SelectionDAG &DAG,
+ const SDLoc &dl, SelectionDAG &DAG,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const;
@@ -997,12 +1054,15 @@ namespace llvm {
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const;
SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
- SDValue Chain, bool IsTailCall, bool Is64Bit,
- int FPDiff, SDLoc dl) const;
+ SDValue Chain, bool IsTailCall,
+ bool Is64Bit, int FPDiff,
+ const SDLoc &dl) const;
unsigned GetAlignedArgumentStackSize(unsigned StackSize,
SelectionDAG &DAG) const;
+ unsigned getAddressSpace(void) const;
+
std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
bool isSigned,
bool isReplace) const;
@@ -1017,7 +1077,7 @@ namespace llvm {
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
+ SDValue LowerGlobalAddress(const GlobalValue *GV, const SDLoc &dl,
int64_t Offset, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
@@ -1030,8 +1090,8 @@ namespace llvm {
SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerToBT(SDValue And, ISD::CondCode CC,
- SDLoc dl, SelectionDAG &DAG) const;
+ SDValue LowerToBT(SDValue And, ISD::CondCode CC, const SDLoc &dl,
+ SelectionDAG &DAG) const;
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSETCCE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
@@ -1046,6 +1106,7 @@ namespace llvm {
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const;
@@ -1053,19 +1114,17 @@ namespace llvm {
SDValue LowerGC_TRANSITION_END(SDValue Op, SelectionDAG &DAG) const;
SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const override;
+ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
- SDValue LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const override;
+ const SDLoc &dl, SelectionDAG &DAG) const override;
bool supportSplitCSR(MachineFunction *MF) const override {
return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
@@ -1080,8 +1139,8 @@ namespace llvm {
bool mayBeEmittedAsTailCall(CallInst *CI) const override;
- EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
- ISD::NodeType ExtendKind) const override;
+ EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
+ ISD::NodeType ExtendKind) const override;
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
bool isVarArg,
@@ -1101,57 +1160,60 @@ namespace llvm {
bool needsCmpXchgNb(Type *MemType) const;
+ void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
+ MachineBasicBlock *DispatchBB, int FI) const;
+
// Utility function to emit the low-level va_arg code for X86-64.
- MachineBasicBlock *EmitVAARG64WithCustomInserter(
- MachineInstr *MI,
- MachineBasicBlock *MBB) const;
+ MachineBasicBlock *
+ EmitVAARG64WithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *MBB) const;
/// Utility function to emit the xmm reg save portion of va_start.
- MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter(
- MachineInstr *BInstr,
- MachineBasicBlock *BB) const;
+ MachineBasicBlock *
+ EmitVAStartSaveXMMRegsWithCustomInserter(MachineInstr &BInstr,
+ MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredSelect(MachineInstr *I,
+ MachineBasicBlock *EmitLoweredSelect(MachineInstr &I,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredAtomicFP(MachineInstr *I,
+ MachineBasicBlock *EmitLoweredAtomicFP(MachineInstr &I,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI,
- MachineBasicBlock *BB) const;
-
- MachineBasicBlock *EmitLoweredCatchRet(MachineInstr *MI,
+ MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredCatchPad(MachineInstr *MI,
+ MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr *MI,
+ MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredTLSAddr(MachineInstr *MI,
+ MachineBasicBlock *EmitLoweredTLSAddr(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI,
+ MachineBasicBlock *EmitLoweredTLSCall(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr *MI,
+ MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const;
- MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI,
+ MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const;
- MachineBasicBlock *emitFMA3Instr(MachineInstr *MI,
+ MachineBasicBlock *emitFMA3Instr(MachineInstr &MI,
MachineBasicBlock *MBB) const;
+ MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr &MI,
+ MachineBasicBlock *MBB) const;
+
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent, for use with the given x86 condition code.
- SDValue EmitTest(SDValue Op0, unsigned X86CC, SDLoc dl,
+ SDValue EmitTest(SDValue Op0, unsigned X86CC, const SDLoc &dl,
SelectionDAG &DAG) const;
/// Emit nodes that will be selected as "cmp Op0,Op1", or something
/// equivalent, for use with the given x86 condition code.
- SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, SDLoc dl,
+ SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, const SDLoc &dl,
SelectionDAG &DAG) const;
/// Convert a comparison if required by the subtarget.
@@ -1173,7 +1235,7 @@ namespace llvm {
namespace X86 {
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo);
- }
-}
+ } // end namespace X86
+} // end namespace llvm
-#endif // X86ISELLOWERING_H
+#endif // LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td
index 6f0199b015cdc..de4129f865417 100644
--- a/lib/Target/X86/X86InstrAVX512.td
+++ b/lib/Target/X86/X86InstrAVX512.td
@@ -30,6 +30,10 @@ class X86VectorVTInfo<int numelts, ValueType eltvt, RegisterClass rc,
// Corresponding write-mask register class.
RegisterClass KRCWM = !cast<RegisterClass>("VK" # NumElts # "WM");
+ // The mask VT.
+ ValueType KVT = !cast<ValueType>(!if (!eq (NumElts, 1), "i1",
+ "v" # NumElts # "i1"));
+
// The GPR register class that can hold the write mask. Use GR8 for fewer
// than 8 elements. Use shift-right and equal to work around the lack of
// !lt in tablegen.
@@ -95,6 +99,12 @@ class X86VectorVTInfo<int numelts, ValueType eltvt, RegisterClass rc,
"v" # NumElts # "f" # EltSize,
VTName)));
+ ValueType IntVT = !cast<ValueType>(
+ !if (!eq (!srl(EltSize,5),0),
+ VTName,
+ !if (!eq(TypeVariantName, "f"),
+ "v" # NumElts # "i" # EltSize,
+ VTName)));
// The string to specify embedded broadcast in assembly.
string BroadcastStr = "{1to" # NumElts # "}";
@@ -238,12 +248,12 @@ multiclass AVX512_maskable<bits<8> O, Format F, X86VectorVTInfo _,
string AttSrcAsm, string IntelSrcAsm,
dag RHS,
InstrItinClass itin = NoItinerary,
- bit IsCommutable = 0> :
+ bit IsCommutable = 0, SDNode Select = vselect> :
AVX512_maskable_common<O, F, _, Outs, Ins,
!con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
!con((ins _.KRCWM:$mask), Ins),
OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
- (vselect _.KRCWM:$mask, RHS, _.RC:$src0), vselect,
+ (Select _.KRCWM:$mask, RHS, _.RC:$src0), Select,
"$src0 = $dst", itin, IsCommutable>;
// This multiclass generates the unconditional/non-masking, the masking and
@@ -258,8 +268,8 @@ multiclass AVX512_maskable_scalar<bits<8> O, Format F, X86VectorVTInfo _,
!con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
!con((ins _.KRCWM:$mask), Ins),
OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
- (X86select _.KRCWM:$mask, RHS, _.RC:$src0), X86select,
- "$src0 = $dst", itin, IsCommutable>;
+ (X86selects _.KRCWM:$mask, RHS, _.RC:$src0),
+ X86selects, "$src0 = $dst", itin, IsCommutable>;
// Similar to AVX512_maskable but in this case one of the source operands
// ($src1) is already tied to $dst so we just use that for the preserved
@@ -301,7 +311,8 @@ multiclass AVX512_maskable_3src_scalar<bits<8> O, Format F, X86VectorVTInfo _,
!con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
!con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
- (X86select _.KRCWM:$mask, RHS, _.RC:$src1)>;
+ (X86selects _.KRCWM:$mask, RHS, _.RC:$src1),
+ X86selects>;
multiclass AVX512_maskable_in_asm<bits<8> O, Format F, X86VectorVTInfo _,
dag Outs, dag Ins,
@@ -363,119 +374,58 @@ multiclass AVX512_maskable_cmp_alt<bits<8> O, Format F, X86VectorVTInfo _,
AttSrcAsm, IntelSrcAsm, [],[]>;
// Bitcasts between 512-bit vector types. Return the original type since
-// no instruction is needed for the conversion
-let Predicates = [HasAVX512] in {
- def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
- def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
- def : Pat<(v8f64 (bitconvert (v32i16 VR512:$src))), (v8f64 VR512:$src)>;
- def : Pat<(v8f64 (bitconvert (v64i8 VR512:$src))), (v8f64 VR512:$src)>;
- def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
- def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>;
- def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
- def : Pat<(v16f32 (bitconvert (v32i16 VR512:$src))), (v16f32 VR512:$src)>;
- def : Pat<(v16f32 (bitconvert (v64i8 VR512:$src))), (v16f32 VR512:$src)>;
- def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>;
- def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
- def : Pat<(v8i64 (bitconvert (v32i16 VR512:$src))), (v8i64 VR512:$src)>;
- def : Pat<(v8i64 (bitconvert (v64i8 VR512:$src))), (v8i64 VR512:$src)>;
- def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>;
- def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
- def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
- def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
- def : Pat<(v16i32 (bitconvert (v32i16 VR512:$src))), (v16i32 VR512:$src)>;
- def : Pat<(v16i32 (bitconvert (v64i8 VR512:$src))), (v16i32 VR512:$src)>;
- def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>;
- def : Pat<(v32i16 (bitconvert (v8i64 VR512:$src))), (v32i16 VR512:$src)>;
- def : Pat<(v32i16 (bitconvert (v16i32 VR512:$src))), (v32i16 VR512:$src)>;
- def : Pat<(v32i16 (bitconvert (v64i8 VR512:$src))), (v32i16 VR512:$src)>;
- def : Pat<(v32i16 (bitconvert (v8f64 VR512:$src))), (v32i16 VR512:$src)>;
- def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
- def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
- def : Pat<(v64i8 (bitconvert (v8i64 VR512:$src))), (v64i8 VR512:$src)>;
- def : Pat<(v64i8 (bitconvert (v16i32 VR512:$src))), (v64i8 VR512:$src)>;
- def : Pat<(v64i8 (bitconvert (v32i16 VR512:$src))), (v64i8 VR512:$src)>;
- def : Pat<(v64i8 (bitconvert (v8f64 VR512:$src))), (v64i8 VR512:$src)>;
- def : Pat<(v64i8 (bitconvert (v16f32 VR512:$src))), (v64i8 VR512:$src)>;
-
- def : Pat<(v2i64 (bitconvert (v4i32 VR128X:$src))), (v2i64 VR128X:$src)>;
- def : Pat<(v2i64 (bitconvert (v8i16 VR128X:$src))), (v2i64 VR128X:$src)>;
- def : Pat<(v2i64 (bitconvert (v16i8 VR128X:$src))), (v2i64 VR128X:$src)>;
- def : Pat<(v2i64 (bitconvert (v2f64 VR128X:$src))), (v2i64 VR128X:$src)>;
- def : Pat<(v2i64 (bitconvert (v4f32 VR128X:$src))), (v2i64 VR128X:$src)>;
- def : Pat<(v4i32 (bitconvert (v2i64 VR128X:$src))), (v4i32 VR128X:$src)>;
- def : Pat<(v4i32 (bitconvert (v8i16 VR128X:$src))), (v4i32 VR128X:$src)>;
- def : Pat<(v4i32 (bitconvert (v16i8 VR128X:$src))), (v4i32 VR128X:$src)>;
- def : Pat<(v4i32 (bitconvert (v2f64 VR128X:$src))), (v4i32 VR128X:$src)>;
- def : Pat<(v4i32 (bitconvert (v4f32 VR128X:$src))), (v4i32 VR128X:$src)>;
- def : Pat<(v8i16 (bitconvert (v2i64 VR128X:$src))), (v8i16 VR128X:$src)>;
- def : Pat<(v8i16 (bitconvert (v4i32 VR128X:$src))), (v8i16 VR128X:$src)>;
- def : Pat<(v8i16 (bitconvert (v16i8 VR128X:$src))), (v8i16 VR128X:$src)>;
- def : Pat<(v8i16 (bitconvert (v2f64 VR128X:$src))), (v8i16 VR128X:$src)>;
- def : Pat<(v8i16 (bitconvert (v4f32 VR128X:$src))), (v8i16 VR128X:$src)>;
- def : Pat<(v16i8 (bitconvert (v2i64 VR128X:$src))), (v16i8 VR128X:$src)>;
- def : Pat<(v16i8 (bitconvert (v4i32 VR128X:$src))), (v16i8 VR128X:$src)>;
- def : Pat<(v16i8 (bitconvert (v8i16 VR128X:$src))), (v16i8 VR128X:$src)>;
- def : Pat<(v16i8 (bitconvert (v2f64 VR128X:$src))), (v16i8 VR128X:$src)>;
- def : Pat<(v16i8 (bitconvert (v4f32 VR128X:$src))), (v16i8 VR128X:$src)>;
- def : Pat<(v4f32 (bitconvert (v2i64 VR128X:$src))), (v4f32 VR128X:$src)>;
- def : Pat<(v4f32 (bitconvert (v4i32 VR128X:$src))), (v4f32 VR128X:$src)>;
- def : Pat<(v4f32 (bitconvert (v8i16 VR128X:$src))), (v4f32 VR128X:$src)>;
- def : Pat<(v4f32 (bitconvert (v16i8 VR128X:$src))), (v4f32 VR128X:$src)>;
- def : Pat<(v4f32 (bitconvert (v2f64 VR128X:$src))), (v4f32 VR128X:$src)>;
- def : Pat<(v2f64 (bitconvert (v2i64 VR128X:$src))), (v2f64 VR128X:$src)>;
- def : Pat<(v2f64 (bitconvert (v4i32 VR128X:$src))), (v2f64 VR128X:$src)>;
- def : Pat<(v2f64 (bitconvert (v8i16 VR128X:$src))), (v2f64 VR128X:$src)>;
- def : Pat<(v2f64 (bitconvert (v16i8 VR128X:$src))), (v2f64 VR128X:$src)>;
- def : Pat<(v2f64 (bitconvert (v4f32 VR128X:$src))), (v2f64 VR128X:$src)>;
-
-// Bitcasts between 256-bit vector types. Return the original type since
-// no instruction is needed for the conversion
- def : Pat<(v4f64 (bitconvert (v8f32 VR256X:$src))), (v4f64 VR256X:$src)>;
- def : Pat<(v4f64 (bitconvert (v8i32 VR256X:$src))), (v4f64 VR256X:$src)>;
- def : Pat<(v4f64 (bitconvert (v4i64 VR256X:$src))), (v4f64 VR256X:$src)>;
- def : Pat<(v4f64 (bitconvert (v16i16 VR256X:$src))), (v4f64 VR256X:$src)>;
- def : Pat<(v4f64 (bitconvert (v32i8 VR256X:$src))), (v4f64 VR256X:$src)>;
- def : Pat<(v8f32 (bitconvert (v8i32 VR256X:$src))), (v8f32 VR256X:$src)>;
- def : Pat<(v8f32 (bitconvert (v4i64 VR256X:$src))), (v8f32 VR256X:$src)>;
- def : Pat<(v8f32 (bitconvert (v4f64 VR256X:$src))), (v8f32 VR256X:$src)>;
- def : Pat<(v8f32 (bitconvert (v32i8 VR256X:$src))), (v8f32 VR256X:$src)>;
- def : Pat<(v8f32 (bitconvert (v16i16 VR256X:$src))), (v8f32 VR256X:$src)>;
- def : Pat<(v4i64 (bitconvert (v8f32 VR256X:$src))), (v4i64 VR256X:$src)>;
- def : Pat<(v4i64 (bitconvert (v8i32 VR256X:$src))), (v4i64 VR256X:$src)>;
- def : Pat<(v4i64 (bitconvert (v4f64 VR256X:$src))), (v4i64 VR256X:$src)>;
- def : Pat<(v4i64 (bitconvert (v32i8 VR256X:$src))), (v4i64 VR256X:$src)>;
- def : Pat<(v4i64 (bitconvert (v16i16 VR256X:$src))), (v4i64 VR256X:$src)>;
- def : Pat<(v32i8 (bitconvert (v4f64 VR256X:$src))), (v32i8 VR256X:$src)>;
- def : Pat<(v32i8 (bitconvert (v4i64 VR256X:$src))), (v32i8 VR256X:$src)>;
- def : Pat<(v32i8 (bitconvert (v8f32 VR256X:$src))), (v32i8 VR256X:$src)>;
- def : Pat<(v32i8 (bitconvert (v8i32 VR256X:$src))), (v32i8 VR256X:$src)>;
- def : Pat<(v32i8 (bitconvert (v16i16 VR256X:$src))), (v32i8 VR256X:$src)>;
- def : Pat<(v8i32 (bitconvert (v32i8 VR256X:$src))), (v8i32 VR256X:$src)>;
- def : Pat<(v8i32 (bitconvert (v16i16 VR256X:$src))), (v8i32 VR256X:$src)>;
- def : Pat<(v8i32 (bitconvert (v8f32 VR256X:$src))), (v8i32 VR256X:$src)>;
- def : Pat<(v8i32 (bitconvert (v4i64 VR256X:$src))), (v8i32 VR256X:$src)>;
- def : Pat<(v8i32 (bitconvert (v4f64 VR256X:$src))), (v8i32 VR256X:$src)>;
- def : Pat<(v16i16 (bitconvert (v8f32 VR256X:$src))), (v16i16 VR256X:$src)>;
- def : Pat<(v16i16 (bitconvert (v8i32 VR256X:$src))), (v16i16 VR256X:$src)>;
- def : Pat<(v16i16 (bitconvert (v4i64 VR256X:$src))), (v16i16 VR256X:$src)>;
- def : Pat<(v16i16 (bitconvert (v4f64 VR256X:$src))), (v16i16 VR256X:$src)>;
- def : Pat<(v16i16 (bitconvert (v32i8 VR256X:$src))), (v16i16 VR256X:$src)>;
-}
-
-//
-// AVX-512: VPXOR instruction writes zero to its upper part, it's safe build zeros.
-//
-
+// no instruction is needed for the conversion.
+def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
+def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
+def : Pat<(v8f64 (bitconvert (v32i16 VR512:$src))), (v8f64 VR512:$src)>;
+def : Pat<(v8f64 (bitconvert (v64i8 VR512:$src))), (v8f64 VR512:$src)>;
+def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
+def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>;
+def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
+def : Pat<(v16f32 (bitconvert (v32i16 VR512:$src))), (v16f32 VR512:$src)>;
+def : Pat<(v16f32 (bitconvert (v64i8 VR512:$src))), (v16f32 VR512:$src)>;
+def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>;
+def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
+def : Pat<(v8i64 (bitconvert (v32i16 VR512:$src))), (v8i64 VR512:$src)>;
+def : Pat<(v8i64 (bitconvert (v64i8 VR512:$src))), (v8i64 VR512:$src)>;
+def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>;
+def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
+def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
+def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
+def : Pat<(v16i32 (bitconvert (v32i16 VR512:$src))), (v16i32 VR512:$src)>;
+def : Pat<(v16i32 (bitconvert (v64i8 VR512:$src))), (v16i32 VR512:$src)>;
+def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>;
+def : Pat<(v32i16 (bitconvert (v8i64 VR512:$src))), (v32i16 VR512:$src)>;
+def : Pat<(v32i16 (bitconvert (v16i32 VR512:$src))), (v32i16 VR512:$src)>;
+def : Pat<(v32i16 (bitconvert (v64i8 VR512:$src))), (v32i16 VR512:$src)>;
+def : Pat<(v32i16 (bitconvert (v8f64 VR512:$src))), (v32i16 VR512:$src)>;
+def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
+def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
+def : Pat<(v64i8 (bitconvert (v8i64 VR512:$src))), (v64i8 VR512:$src)>;
+def : Pat<(v64i8 (bitconvert (v16i32 VR512:$src))), (v64i8 VR512:$src)>;
+def : Pat<(v64i8 (bitconvert (v32i16 VR512:$src))), (v64i8 VR512:$src)>;
+def : Pat<(v64i8 (bitconvert (v8f64 VR512:$src))), (v64i8 VR512:$src)>;
+def : Pat<(v64i8 (bitconvert (v16f32 VR512:$src))), (v64i8 VR512:$src)>;
+
+// Alias instruction that maps zero vector to pxor / xorp* for AVX-512.
+// This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then
+// swizzled by ExecutionDepsFix to pxor.
+// We set canFoldAsLoad because this can be converted to a constant-pool
+// load of an all-zeros value if folding it would be beneficial.
let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
- isPseudo = 1, Predicates = [HasAVX512] in {
+ isPseudo = 1, Predicates = [HasAVX512], SchedRW = [WriteZero] in {
def AVX512_512_SET0 : I<0, Pseudo, (outs VR512:$dst), (ins), "",
- [(set VR512:$dst, (v16f32 immAllZerosV))]>;
+ [(set VR512:$dst, (v16i32 immAllZerosV))]>;
+def AVX512_512_SETALLONES : I<0, Pseudo, (outs VR512:$dst), (ins), "",
+ [(set VR512:$dst, (v16i32 immAllOnesV))]>;
}
-let Predicates = [HasAVX512] in {
-def : Pat<(v8i64 immAllZerosV), (AVX512_512_SET0)>;
-def : Pat<(v16i32 immAllZerosV), (AVX512_512_SET0)>;
-def : Pat<(v8f64 immAllZerosV), (AVX512_512_SET0)>;
+let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
+ isPseudo = 1, Predicates = [HasVLX], SchedRW = [WriteZero] in {
+def AVX512_128_SET0 : I<0, Pseudo, (outs VR128X:$dst), (ins), "",
+ [(set VR128X:$dst, (v4i32 immAllZerosV))]>;
+def AVX512_256_SET0 : I<0, Pseudo, (outs VR256X:$dst), (ins), "",
+ [(set VR256X:$dst, (v8i32 immAllZerosV))]>;
}
//===----------------------------------------------------------------------===//
@@ -483,7 +433,7 @@ def : Pat<(v8f64 immAllZerosV), (AVX512_512_SET0)>;
//
multiclass vinsert_for_size<int Opcode, X86VectorVTInfo From, X86VectorVTInfo To,
PatFrag vinsert_insert> {
- let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
+ let ExeDomain = To.ExeDomain in {
defm rr : AVX512_maskable<Opcode, MRMSrcReg, To, (outs To.RC:$dst),
(ins To.RC:$src1, From.RC:$src2, i32u8imm:$src3),
"vinsert" # From.EltTypeName # "x" # From.NumElts,
@@ -492,7 +442,6 @@ multiclass vinsert_for_size<int Opcode, X86VectorVTInfo From, X86VectorVTInfo To
(From.VT From.RC:$src2),
(iPTR imm))>, AVX512AIi8Base, EVEX_4V;
- let mayLoad = 1 in
defm rm : AVX512_maskable<Opcode, MRMSrcMem, To, (outs To.RC:$dst),
(ins To.RC:$src1, From.MemOp:$src2, i32u8imm:$src3),
"vinsert" # From.EltTypeName # "x" # From.NumElts,
@@ -615,19 +564,9 @@ def VINSERTPSzrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
// AVX-512 VECTOR EXTRACT
//---
-multiclass vextract_for_size_first_position_lowering<X86VectorVTInfo From,
- X86VectorVTInfo To> {
- // A subvector extract from the first vector position is
- // a subregister copy that needs no instruction.
- def NAME # To.NumElts:
- Pat<(To.VT (extract_subvector (From.VT From.RC:$src),(iPTR 0))),
- (To.VT (EXTRACT_SUBREG (From.VT From.RC:$src), To.SubRegIdx))>;
-}
-
multiclass vextract_for_size<int Opcode,
X86VectorVTInfo From, X86VectorVTInfo To,
- PatFrag vextract_extract> :
- vextract_for_size_first_position_lowering<From, To> {
+ PatFrag vextract_extract> {
let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
// use AVX512_maskable_in_asm (AVX512_maskable can't be used due to
@@ -640,21 +579,22 @@ multiclass vextract_for_size<int Opcode,
[(set To.RC:$dst, (vextract_extract:$idx (From.VT From.RC:$src1),
(iPTR imm)))]>,
AVX512AIi8Base, EVEX;
- let mayStore = 1 in {
- def rm : AVX512AIi8<Opcode, MRMDestMem, (outs),
- (ins To.MemOp:$dst, From.RC:$src1, i32u8imm:$src2),
- "vextract" # To.EltTypeName # "x" # To.NumElts #
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, EVEX;
-
- def rmk : AVX512AIi8<Opcode, MRMDestMem, (outs),
- (ins To.MemOp:$dst, To.KRCWM:$mask,
- From.RC:$src1, i32u8imm:$src2),
- "vextract" # To.EltTypeName # "x" # To.NumElts #
- "\t{$src2, $src1, $dst {${mask}}|"
- "$dst {${mask}}, $src1, $src2}",
- []>, EVEX_K, EVEX;
- }//mayStore = 1
+ def mr : AVX512AIi8<Opcode, MRMDestMem, (outs),
+ (ins To.MemOp:$dst, From.RC:$src1, i32u8imm:$idx),
+ "vextract" # To.EltTypeName # "x" # To.NumElts #
+ "\t{$idx, $src1, $dst|$dst, $src1, $idx}",
+ [(store (To.VT (vextract_extract:$idx
+ (From.VT From.RC:$src1), (iPTR imm))),
+ addr:$dst)]>, EVEX;
+
+ let mayStore = 1, hasSideEffects = 0 in
+ def mrk : AVX512AIi8<Opcode, MRMDestMem, (outs),
+ (ins To.MemOp:$dst, To.KRCWM:$mask,
+ From.RC:$src1, i32u8imm:$idx),
+ "vextract" # To.EltTypeName # "x" # To.NumElts #
+ "\t{$idx, $src1, $dst {${mask}}|"
+ "$dst {${mask}}, $src1, $idx}",
+ []>, EVEX_K, EVEX;
}
// Intrinsic call with masking.
@@ -688,14 +628,17 @@ multiclass vextract_for_size<int Opcode,
// Codegen pattern for the alternative types
multiclass vextract_for_size_lowering<string InstrStr, X86VectorVTInfo From,
X86VectorVTInfo To, PatFrag vextract_extract,
- SDNodeXForm EXTRACT_get_vextract_imm, list<Predicate> p> :
- vextract_for_size_first_position_lowering<From, To> {
-
- let Predicates = p in
+ SDNodeXForm EXTRACT_get_vextract_imm, list<Predicate> p> {
+ let Predicates = p in {
def : Pat<(vextract_extract:$ext (From.VT From.RC:$src1), (iPTR imm)),
(To.VT (!cast<Instruction>(InstrStr#"rr")
From.RC:$src1,
(EXTRACT_get_vextract_imm To.RC:$ext)))>;
+ def : Pat<(store (To.VT (vextract_extract:$ext (From.VT From.RC:$src1),
+ (iPTR imm))), addr:$dst),
+ (!cast<Instruction>(InstrStr#"mr") addr:$dst, From.RC:$src1,
+ (EXTRACT_get_vextract_imm To.RC:$ext))>;
+ }
}
multiclass vextract_for_type<ValueType EltVT32, int Opcode128,
@@ -756,6 +699,12 @@ defm : vextract_for_size_lowering<"VEXTRACTF32x4Z256", v4f64x_info, v2f64x_info,
defm : vextract_for_size_lowering<"VEXTRACTI32x4Z256", v4i64x_info, v2i64x_info,
vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX, NoDQI]>;
+// Codegen pattern with the alternative types extract VEC128 from VEC256
+defm : vextract_for_size_lowering<"VEXTRACTI32x4Z256", v16i16x_info, v8i16x_info,
+ vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX]>;
+defm : vextract_for_size_lowering<"VEXTRACTI32x4Z256", v32i8x_info, v16i8x_info,
+ vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX]>;
+
// Codegen pattern with the alternative types extract VEC128 from VEC512
defm : vextract_for_size_lowering<"VEXTRACTI32x4Z", v32i16_info, v8i16x_info,
vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512]>;
@@ -767,46 +716,76 @@ defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v32i16_info, v16i16x_info,
defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v64i8_info, v32i8x_info,
vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512]>;
+// A 128-bit subvector extract from the first 256-bit vector position
+// is a subregister copy that needs no instruction.
+def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
+ (v2i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm))>;
+def : Pat<(v2f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
+ (v2f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>;
+def : Pat<(v4i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
+ (v4i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm))>;
+def : Pat<(v4f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
+ (v4f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
+def : Pat<(v8i16 (extract_subvector (v32i16 VR512:$src), (iPTR 0))),
+ (v8i16 (EXTRACT_SUBREG (v32i16 VR512:$src), sub_xmm))>;
+def : Pat<(v16i8 (extract_subvector (v64i8 VR512:$src), (iPTR 0))),
+ (v16i8 (EXTRACT_SUBREG (v64i8 VR512:$src), sub_xmm))>;
+
+// A 256-bit subvector extract from the first 256-bit vector position
+// is a subregister copy that needs no instruction.
+def : Pat<(v4i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
+ (v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm))>;
+def : Pat<(v4f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
+ (v4f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm))>;
+def : Pat<(v8i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
+ (v8i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm))>;
+def : Pat<(v8f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
+ (v8f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm))>;
+def : Pat<(v16i16 (extract_subvector (v32i16 VR512:$src), (iPTR 0))),
+ (v16i16 (EXTRACT_SUBREG (v32i16 VR512:$src), sub_ymm))>;
+def : Pat<(v32i8 (extract_subvector (v64i8 VR512:$src), (iPTR 0))),
+ (v32i8 (EXTRACT_SUBREG (v64i8 VR512:$src), sub_ymm))>;
+
+let AddedComplexity = 25 in { // to give priority over vinsertf128rm
// A 128-bit subvector insert to the first 512-bit vector position
// is a subregister copy that needs no instruction.
-def : Pat<(insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0)),
- (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)),
- (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
- sub_ymm)>;
-def : Pat<(insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0)),
- (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)),
- (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
- sub_ymm)>;
-def : Pat<(insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0)),
- (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
- (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
- sub_ymm)>;
-def : Pat<(insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0)),
- (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
- (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
- sub_ymm)>;
-
-def : Pat<(insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0)),
+def : Pat<(v8i64 (insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0))),
+ (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
+def : Pat<(v8f64 (insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0))),
+ (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
+def : Pat<(v16i32 (insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0))),
+ (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
+def : Pat<(v16f32 (insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0))),
+ (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
+def : Pat<(v32i16 (insert_subvector undef, (v8i16 VR128X:$src), (iPTR 0))),
+ (INSERT_SUBREG (v32i16 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
+def : Pat<(v64i8 (insert_subvector undef, (v16i8 VR128X:$src), (iPTR 0))),
+ (INSERT_SUBREG (v64i8 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
+
+// A 256-bit subvector insert to the first 512-bit vector position
+// is a subregister copy that needs no instruction.
+def : Pat<(v8i64 (insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0))),
(INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
-def : Pat<(insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0)),
+def : Pat<(v8f64 (insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0))),
(INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
-def : Pat<(insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0)),
+def : Pat<(v16i32 (insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0))),
(INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
-def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)),
+def : Pat<(v16f32 (insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0))),
(INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
-def : Pat<(insert_subvector undef, (v16i16 VR256X:$src), (iPTR 0)),
+def : Pat<(v32i16 (insert_subvector undef, (v16i16 VR256X:$src), (iPTR 0))),
(INSERT_SUBREG (v32i16 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
-def : Pat<(insert_subvector undef, (v32i8 VR256X:$src), (iPTR 0)),
+def : Pat<(v64i8 (insert_subvector undef, (v32i8 VR256X:$src), (iPTR 0))),
(INSERT_SUBREG (v64i8 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
+}
// vextractps - extract 32 bits from XMM
-def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
+def VEXTRACTPSZrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
(ins VR128X:$src1, u8imm:$src2),
"vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
EVEX;
-def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
+def VEXTRACTPSZmr : AVX512AIi8<0x17, MRMDestMem, (outs),
(ins f32mem:$dst, VR128X:$src1, u8imm:$src2),
"vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
@@ -815,90 +794,107 @@ def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
//===---------------------------------------------------------------------===//
// AVX-512 BROADCAST
//---
+// broadcast with a scalar argument.
+multiclass avx512_broadcast_scalar<bits<8> opc, string OpcodeStr,
+ X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo> {
+
+ let isCodeGenOnly = 1 in {
+ def r_s : I< opc, MRMSrcReg, (outs DestInfo.RC:$dst),
+ (ins SrcInfo.FRC:$src), OpcodeStr#"\t{$src, $dst|$dst, $src}",
+ [(set DestInfo.RC:$dst, (DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)))]>,
+ Requires<[HasAVX512]>, T8PD, EVEX;
+
+ let Constraints = "$src0 = $dst" in
+ def rk_s : I< opc, MRMSrcReg, (outs DestInfo.RC:$dst),
+ (ins DestInfo.RC:$src0, DestInfo.KRCWM:$mask, SrcInfo.FRC:$src),
+ OpcodeStr#"\t{$src, $dst {${mask}} |$dst {${mask}}, $src}",
+ [(set DestInfo.RC:$dst,
+ (vselect DestInfo.KRCWM:$mask,
+ (DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)),
+ DestInfo.RC:$src0))]>,
+ Requires<[HasAVX512]>, T8PD, EVEX, EVEX_K;
+
+ def rkz_s : I< opc, MRMSrcReg, (outs DestInfo.RC:$dst),
+ (ins DestInfo.KRCWM:$mask, SrcInfo.FRC:$src),
+ OpcodeStr#"\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}",
+ [(set DestInfo.RC:$dst,
+ (vselect DestInfo.KRCWM:$mask,
+ (DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)),
+ DestInfo.ImmAllZerosV))]>,
+ Requires<[HasAVX512]>, T8PD, EVEX, EVEX_KZ;
+ } // let isCodeGenOnly = 1 in
+}
multiclass avx512_broadcast_rm<bits<8> opc, string OpcodeStr,
X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo> {
-
+ let ExeDomain = DestInfo.ExeDomain in {
defm r : AVX512_maskable<opc, MRMSrcReg, DestInfo, (outs DestInfo.RC:$dst),
(ins SrcInfo.RC:$src), OpcodeStr, "$src", "$src",
(DestInfo.VT (X86VBroadcast (SrcInfo.VT SrcInfo.RC:$src)))>,
T8PD, EVEX;
- let mayLoad = 1 in
- defm m : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
- (ins SrcInfo.ScalarMemOp:$src), OpcodeStr, "$src", "$src",
- (DestInfo.VT (X86VBroadcast
- (SrcInfo.ScalarLdFrag addr:$src)))>,
- T8PD, EVEX, EVEX_CD8<SrcInfo.EltSize, CD8VT1>;
-}
+ defm m : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
+ (ins SrcInfo.ScalarMemOp:$src), OpcodeStr, "$src", "$src",
+ (DestInfo.VT (X86VBroadcast
+ (SrcInfo.ScalarLdFrag addr:$src)))>,
+ T8PD, EVEX, EVEX_CD8<SrcInfo.EltSize, CD8VT1>;
+ }
-multiclass avx512_fp_broadcast_vl<bits<8> opc, string OpcodeStr,
+ def : Pat<(DestInfo.VT (X86VBroadcast
+ (SrcInfo.VT (scalar_to_vector
+ (SrcInfo.ScalarLdFrag addr:$src))))),
+ (!cast<Instruction>(NAME#DestInfo.ZSuffix#m) addr:$src)>;
+ let AddedComplexity = 20 in
+ def : Pat<(DestInfo.VT (vselect DestInfo.KRCWM:$mask,
+ (X86VBroadcast
+ (SrcInfo.VT (scalar_to_vector
+ (SrcInfo.ScalarLdFrag addr:$src)))),
+ DestInfo.RC:$src0)),
+ (!cast<Instruction>(NAME#DestInfo.ZSuffix#mk)
+ DestInfo.RC:$src0, DestInfo.KRCWM:$mask, addr:$src)>;
+ let AddedComplexity = 30 in
+ def : Pat<(DestInfo.VT (vselect DestInfo.KRCWM:$mask,
+ (X86VBroadcast
+ (SrcInfo.VT (scalar_to_vector
+ (SrcInfo.ScalarLdFrag addr:$src)))),
+ DestInfo.ImmAllZerosV)),
+ (!cast<Instruction>(NAME#DestInfo.ZSuffix#mkz)
+ DestInfo.KRCWM:$mask, addr:$src)>;
+}
+
+multiclass avx512_fp_broadcast_sd<bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo _> {
- defm Z : avx512_broadcast_rm<opc, OpcodeStr, _.info512, _.info128>,
- EVEX_V512;
+ let Predicates = [HasAVX512] in
+ defm Z : avx512_broadcast_rm<opc, OpcodeStr, _.info512, _.info128>,
+ avx512_broadcast_scalar<opc, OpcodeStr, _.info512, _.info128>,
+ EVEX_V512;
let Predicates = [HasVLX] in {
defm Z256 : avx512_broadcast_rm<opc, OpcodeStr, _.info256, _.info128>,
+ avx512_broadcast_scalar<opc, OpcodeStr, _.info256, _.info128>,
EVEX_V256;
}
}
-let ExeDomain = SSEPackedSingle in {
- defm VBROADCASTSS : avx512_fp_broadcast_vl<0x18, "vbroadcastss",
- avx512vl_f32_info>;
- let Predicates = [HasVLX] in {
- defm VBROADCASTSSZ128 : avx512_broadcast_rm<0x18, "vbroadcastss",
- v4f32x_info, v4f32x_info>, EVEX_V128;
- }
-}
-
-let ExeDomain = SSEPackedDouble in {
- defm VBROADCASTSD : avx512_fp_broadcast_vl<0x19, "vbroadcastsd",
- avx512vl_f64_info>, VEX_W;
-}
-
-// avx512_broadcast_pat introduces patterns for broadcast with a scalar argument.
-// Later, we can canonize broadcast instructions before ISel phase and
-// eliminate additional patterns on ISel.
-// SrcRC_v and SrcRC_s are RegisterClasses for vector and scalar
-// representations of source
-multiclass avx512_broadcast_pat<string InstName, SDNode OpNode,
- X86VectorVTInfo _, RegisterClass SrcRC_v,
- RegisterClass SrcRC_s> {
- def : Pat<(_.VT (OpNode (_.EltVT SrcRC_s:$src))),
- (!cast<Instruction>(InstName##"r")
- (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
-
- let AddedComplexity = 30 in {
- def : Pat<(_.VT (vselect _.KRCWM:$mask,
- (OpNode (_.EltVT SrcRC_s:$src)), _.RC:$src0)),
- (!cast<Instruction>(InstName##"rk") _.RC:$src0, _.KRCWM:$mask,
- (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
+multiclass avx512_fp_broadcast_ss<bits<8> opc, string OpcodeStr,
+ AVX512VLVectorVTInfo _> {
+ let Predicates = [HasAVX512] in
+ defm Z : avx512_broadcast_rm<opc, OpcodeStr, _.info512, _.info128>,
+ avx512_broadcast_scalar<opc, OpcodeStr, _.info512, _.info128>,
+ EVEX_V512;
- def : Pat<(_.VT(vselect _.KRCWM:$mask,
- (OpNode (_.EltVT SrcRC_s:$src)), _.ImmAllZerosV)),
- (!cast<Instruction>(InstName##"rkz") _.KRCWM:$mask,
- (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
+ let Predicates = [HasVLX] in {
+ defm Z256 : avx512_broadcast_rm<opc, OpcodeStr, _.info256, _.info128>,
+ avx512_broadcast_scalar<opc, OpcodeStr, _.info256, _.info128>,
+ EVEX_V256;
+ defm Z128 : avx512_broadcast_rm<opc, OpcodeStr, _.info128, _.info128>,
+ avx512_broadcast_scalar<opc, OpcodeStr, _.info128, _.info128>,
+ EVEX_V128;
}
}
-
-defm : avx512_broadcast_pat<"VBROADCASTSSZ", X86VBroadcast, v16f32_info,
- VR128X, FR32X>;
-defm : avx512_broadcast_pat<"VBROADCASTSDZ", X86VBroadcast, v8f64_info,
- VR128X, FR64X>;
-
-let Predicates = [HasVLX] in {
- defm : avx512_broadcast_pat<"VBROADCASTSSZ256", X86VBroadcast,
- v8f32x_info, VR128X, FR32X>;
- defm : avx512_broadcast_pat<"VBROADCASTSSZ128", X86VBroadcast,
- v4f32x_info, VR128X, FR32X>;
- defm : avx512_broadcast_pat<"VBROADCASTSDZ256", X86VBroadcast,
- v4f64x_info, VR128X, FR64X>;
-}
-
-def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))),
- (VBROADCASTSSZm addr:$src)>;
-def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))),
- (VBROADCASTSDZm addr:$src)>;
+defm VBROADCASTSS : avx512_fp_broadcast_ss<0x18, "vbroadcastss",
+ avx512vl_f32_info>;
+defm VBROADCASTSD : avx512_fp_broadcast_sd<0x19, "vbroadcastsd",
+ avx512vl_f64_info>, VEX_W;
def : Pat<(int_x86_avx512_vbroadcast_ss_512 addr:$src),
(VBROADCASTSSZm addr:$src)>;
@@ -907,9 +903,10 @@ def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src),
multiclass avx512_int_broadcast_reg<bits<8> opc, X86VectorVTInfo _,
RegisterClass SrcRC> {
- defm r : AVX512_maskable_in_asm<opc, MRMSrcReg, _, (outs _.RC:$dst),
- (ins SrcRC:$src), "vpbroadcast"##_.Suffix,
- "$src", "$src", []>, T8PD, EVEX;
+ defm r : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins SrcRC:$src),
+ "vpbroadcast"##_.Suffix, "$src", "$src",
+ (_.VT (X86VBroadcast SrcRC:$src))>, T8PD, EVEX;
}
multiclass avx512_int_broadcast_reg_vl<bits<8> opc, AVX512VLVectorVTInfo _,
@@ -922,10 +919,18 @@ multiclass avx512_int_broadcast_reg_vl<bits<8> opc, AVX512VLVectorVTInfo _,
}
}
-defm VPBROADCASTBr : avx512_int_broadcast_reg_vl<0x7A, avx512vl_i8_info, GR32,
+let isCodeGenOnly = 1 in {
+defm VPBROADCASTBr : avx512_int_broadcast_reg_vl<0x7A, avx512vl_i8_info, GR8,
HasBWI>;
-defm VPBROADCASTWr : avx512_int_broadcast_reg_vl<0x7B, avx512vl_i16_info, GR32,
+defm VPBROADCASTWr : avx512_int_broadcast_reg_vl<0x7B, avx512vl_i16_info, GR16,
HasBWI>;
+}
+let isAsmParserOnly = 1 in {
+ defm VPBROADCASTBr_Alt : avx512_int_broadcast_reg_vl<0x7A, avx512vl_i8_info,
+ GR32, HasBWI>;
+ defm VPBROADCASTWr_Alt : avx512_int_broadcast_reg_vl<0x7B, avx512vl_i16_info,
+ GR32, HasBWI>;
+}
defm VPBROADCASTDr : avx512_int_broadcast_reg_vl<0x7C, avx512vl_i32_info, GR32,
HasAVX512>;
defm VPBROADCASTQr : avx512_int_broadcast_reg_vl<0x7C, avx512vl_i64_info, GR64,
@@ -933,27 +938,9 @@ defm VPBROADCASTQr : avx512_int_broadcast_reg_vl<0x7C, avx512vl_i64_info, GR64,
def : Pat <(v16i32 (X86vzext VK16WM:$mask)),
(VPBROADCASTDrZrkz VK16WM:$mask, (i32 (MOV32ri 0x1)))>;
-
def : Pat <(v8i64 (X86vzext VK8WM:$mask)),
(VPBROADCASTQrZrkz VK8WM:$mask, (i64 (MOV64ri 0x1)))>;
-def : Pat<(v16i32 (X86VBroadcast (i32 GR32:$src))),
- (VPBROADCASTDrZr GR32:$src)>;
-def : Pat<(v8i64 (X86VBroadcast (i64 GR64:$src))),
- (VPBROADCASTQrZr GR64:$src)>;
-
-def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_i32_512 (i32 GR32:$src))),
- (VPBROADCASTDrZr GR32:$src)>;
-def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_i64_512 (i64 GR64:$src))),
- (VPBROADCASTQrZr GR64:$src)>;
-
-def : Pat<(v16i32 (int_x86_avx512_mask_pbroadcast_d_gpr_512 (i32 GR32:$src),
- (v16i32 immAllZerosV), (i16 GR16:$mask))),
- (VPBROADCASTDrZrkz (COPY_TO_REGCLASS GR16:$mask, VK16WM), GR32:$src)>;
-def : Pat<(v8i64 (int_x86_avx512_mask_pbroadcast_q_gpr_512 (i64 GR64:$src),
- (bc_v8i64 (v16i32 immAllZerosV)), (i8 GR8:$mask))),
- (VPBROADCASTQrZrkz (COPY_TO_REGCLASS GR8:$mask, VK8WM), GR64:$src)>;
-
// Provide aliases for broadcast from the same register class that
// automatically does the extract.
multiclass avx512_int_broadcast_rm_lowering<X86VectorVTInfo DestInfo,
@@ -992,12 +979,11 @@ defm VPBROADCASTQ : avx512_int_broadcast_rm_vl<0x59, "vpbroadcastq",
multiclass avx512_subvec_broadcast_rm<bits<8> opc, string OpcodeStr,
X86VectorVTInfo _Dst, X86VectorVTInfo _Src> {
- let mayLoad = 1 in
- defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
- (ins _Src.MemOp:$src), OpcodeStr, "$src", "$src",
- (_Dst.VT (X86SubVBroadcast
- (_Src.VT (bitconvert (_Src.LdFrag addr:$src)))))>,
- AVX5128IBase, EVEX;
+ defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
+ (ins _Src.MemOp:$src), OpcodeStr, "$src", "$src",
+ (_Dst.VT (X86SubVBroadcast
+ (_Src.VT (bitconvert (_Src.LdFrag addr:$src)))))>,
+ AVX5128IBase, EVEX;
}
defm VBROADCASTI32X4 : avx512_subvec_broadcast_rm<0x5a, "vbroadcasti32x4",
@@ -1044,45 +1030,29 @@ defm VBROADCASTF32X8 : avx512_subvec_broadcast_rm<0x1b, "vbroadcastf32x8",
EVEX_V512, EVEX_CD8<32, CD8VT8>;
}
-multiclass avx512_broadcast_32x2<bits<8> opc, string OpcodeStr,
- X86VectorVTInfo _Dst, X86VectorVTInfo _Src,
- SDNode OpNode = X86SubVBroadcast> {
-
- defm r : AVX512_maskable<opc, MRMSrcReg, _Dst, (outs _Dst.RC:$dst),
- (ins _Src.RC:$src), OpcodeStr, "$src", "$src",
- (_Dst.VT (OpNode (_Src.VT _Src.RC:$src)))>,
- T8PD, EVEX;
- let mayLoad = 1 in
- defm m : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
- (ins _Src.ScalarMemOp:$src), OpcodeStr, "$src", "$src",
- (_Dst.VT (OpNode
- (_Src.VT (scalar_to_vector(loadi64 addr:$src)))))>,
- T8PD, EVEX, EVEX_CD8<_Src.EltSize, CD8VT2>;
-}
-
multiclass avx512_common_broadcast_32x2<bits<8> opc, string OpcodeStr,
- AVX512VLVectorVTInfo _> {
+ AVX512VLVectorVTInfo _Dst, AVX512VLVectorVTInfo _Src> {
let Predicates = [HasDQI] in
- defm Z : avx512_broadcast_32x2<opc, OpcodeStr, _.info512, _.info128>,
+ defm Z : avx512_broadcast_rm<opc, OpcodeStr, _Dst.info512, _Src.info128>,
EVEX_V512;
let Predicates = [HasDQI, HasVLX] in
- defm Z256 : avx512_broadcast_32x2<opc, OpcodeStr, _.info256, _.info128>,
+ defm Z256 : avx512_broadcast_rm<opc, OpcodeStr, _Dst.info256, _Src.info128>,
EVEX_V256;
}
multiclass avx512_common_broadcast_i32x2<bits<8> opc, string OpcodeStr,
- AVX512VLVectorVTInfo _> :
- avx512_common_broadcast_32x2<opc, OpcodeStr, _> {
+ AVX512VLVectorVTInfo _Dst, AVX512VLVectorVTInfo _Src> :
+ avx512_common_broadcast_32x2<opc, OpcodeStr, _Dst, _Src> {
let Predicates = [HasDQI, HasVLX] in
- defm Z128 : avx512_broadcast_32x2<opc, OpcodeStr, _.info128, _.info128,
- X86SubV32x2Broadcast>, EVEX_V128;
+ defm Z128 : avx512_broadcast_rm<opc, OpcodeStr, _Dst.info128, _Src.info128>,
+ EVEX_V128;
}
defm VPBROADCASTI32X2 : avx512_common_broadcast_i32x2<0x59, "vbroadcasti32x2",
- avx512vl_i32_info>;
+ avx512vl_i32_info, avx512vl_i64_info>;
defm VPBROADCASTF32X2 : avx512_common_broadcast_32x2<0x19, "vbroadcastf32x2",
- avx512vl_f32_info>;
+ avx512vl_f32_info, avx512vl_f64_info>;
def : Pat<(v16f32 (X86VBroadcast (v16f32 VR512:$src))),
(VBROADCASTSSZr (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
@@ -1094,14 +1064,6 @@ def : Pat<(v8f64 (X86VBroadcast (v8f64 VR512:$src))),
def : Pat<(v8f64 (X86VBroadcast (v4f64 VR256X:$src))),
(VBROADCASTSDZr (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm))>;
-// Provide fallback in case the load node that is used in the patterns above
-// is used by additional users, which prevents the pattern selection.
-def : Pat<(v16f32 (X86VBroadcast FR32X:$src)),
- (VBROADCASTSSZr (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
-def : Pat<(v8f64 (X86VBroadcast FR64X:$src)),
- (VBROADCASTSDZr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
-
-
//===----------------------------------------------------------------------===//
// AVX-512 BROADCAST MASK TO VECTOR REGISTER
//---
@@ -1112,7 +1074,7 @@ multiclass avx512_mask_broadcastm<bits<8> opc, string OpcodeStr,
[(set _.RC:$dst, (_.VT (X86VBroadcastm KRC:$src)))]>, EVEX;
}
-multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
+multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo VTInfo, RegisterClass KRC> {
let Predicates = [HasCDI] in
defm Z : avx512_mask_broadcastm<opc, OpcodeStr, VTInfo.info512, KRC>, EVEX_V512;
@@ -1138,7 +1100,6 @@ let Constraints = "$src1 = $dst" in {
(_.VT (X86VPermi2X IdxVT.RC:$src1, _.RC:$src2, _.RC:$src3))>, EVEX_4V,
AVX5128IBase;
- let mayLoad = 1 in
defm rm: AVX512_maskable_3src_cast<opc, MRMSrcMem, _, IdxVT, (outs _.RC:$dst),
(ins _.RC:$src2, _.MemOp:$src3),
OpcodeStr, "$src3, $src2", "$src2, $src3",
@@ -1149,7 +1110,7 @@ let Constraints = "$src1 = $dst" in {
}
multiclass avx512_perm_i_mb<bits<8> opc, string OpcodeStr,
X86VectorVTInfo _, X86VectorVTInfo IdxVT> {
- let mayLoad = 1, Constraints = "$src1 = $dst" in
+ let Constraints = "$src1 = $dst" in
defm rmb: AVX512_maskable_3src_cast<opc, MRMSrcMem, _, IdxVT, (outs _.RC:$dst),
(ins _.RC:$src2, _.ScalarMemOp:$src3),
OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
@@ -1178,13 +1139,14 @@ multiclass avx512_perm_i_sizes<bits<8> opc, string OpcodeStr,
}
}
-multiclass avx512_perm_i_sizes_w<bits<8> opc, string OpcodeStr,
+multiclass avx512_perm_i_sizes_bw<bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo VTInfo,
- AVX512VLVectorVTInfo Idx> {
- let Predicates = [HasBWI] in
+ AVX512VLVectorVTInfo Idx,
+ Predicate Prd> {
+ let Predicates = [Prd] in
defm NAME: avx512_perm_i<opc, OpcodeStr, VTInfo.info512,
Idx.info512>, EVEX_V512;
- let Predicates = [HasBWI, HasVLX] in {
+ let Predicates = [Prd, HasVLX] in {
defm NAME#128: avx512_perm_i<opc, OpcodeStr, VTInfo.info128,
Idx.info128>, EVEX_V128;
defm NAME#256: avx512_perm_i<opc, OpcodeStr, VTInfo.info256,
@@ -1196,8 +1158,12 @@ defm VPERMI2D : avx512_perm_i_sizes<0x76, "vpermi2d",
avx512vl_i32_info, avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
defm VPERMI2Q : avx512_perm_i_sizes<0x76, "vpermi2q",
avx512vl_i64_info, avx512vl_i64_info>, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VPERMI2W : avx512_perm_i_sizes_w<0x75, "vpermi2w",
- avx512vl_i16_info, avx512vl_i16_info>, VEX_W, EVEX_CD8<16, CD8VF>;
+defm VPERMI2W : avx512_perm_i_sizes_bw<0x75, "vpermi2w",
+ avx512vl_i16_info, avx512vl_i16_info, HasBWI>,
+ VEX_W, EVEX_CD8<16, CD8VF>;
+defm VPERMI2B : avx512_perm_i_sizes_bw<0x75, "vpermi2b",
+ avx512vl_i8_info, avx512vl_i8_info, HasVBMI>,
+ EVEX_CD8<8, CD8VF>;
defm VPERMI2PS : avx512_perm_i_sizes<0x77, "vpermi2ps",
avx512vl_f32_info, avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
defm VPERMI2PD : avx512_perm_i_sizes<0x77, "vpermi2pd",
@@ -1213,7 +1179,6 @@ let Constraints = "$src1 = $dst" in {
(_.VT (X86VPermt2 _.RC:$src1, IdxVT.RC:$src2, _.RC:$src3))>, EVEX_4V,
AVX5128IBase;
- let mayLoad = 1 in
defm rm: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins IdxVT.RC:$src2, _.MemOp:$src3),
OpcodeStr, "$src3, $src2", "$src2, $src3",
@@ -1224,7 +1189,7 @@ let Constraints = "$src1 = $dst" in {
}
multiclass avx512_perm_t_mb<bits<8> opc, string OpcodeStr,
X86VectorVTInfo _, X86VectorVTInfo IdxVT> {
- let mayLoad = 1, Constraints = "$src1 = $dst" in
+ let Constraints = "$src1 = $dst" in
defm rmb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins IdxVT.RC:$src2, _.ScalarMemOp:$src3),
OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
@@ -1253,13 +1218,14 @@ multiclass avx512_perm_t_sizes<bits<8> opc, string OpcodeStr,
}
}
-multiclass avx512_perm_t_sizes_w<bits<8> opc, string OpcodeStr,
+multiclass avx512_perm_t_sizes_bw<bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo VTInfo,
- AVX512VLVectorVTInfo Idx> {
- let Predicates = [HasBWI] in
+ AVX512VLVectorVTInfo Idx,
+ Predicate Prd> {
+ let Predicates = [Prd] in
defm NAME: avx512_perm_t<opc, OpcodeStr, VTInfo.info512,
Idx.info512>, EVEX_V512;
- let Predicates = [HasBWI, HasVLX] in {
+ let Predicates = [Prd, HasVLX] in {
defm NAME#128: avx512_perm_t<opc, OpcodeStr, VTInfo.info128,
Idx.info128>, EVEX_V128;
defm NAME#256: avx512_perm_t<opc, OpcodeStr, VTInfo.info256,
@@ -1271,8 +1237,12 @@ defm VPERMT2D : avx512_perm_t_sizes<0x7E, "vpermt2d",
avx512vl_i32_info, avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
defm VPERMT2Q : avx512_perm_t_sizes<0x7E, "vpermt2q",
avx512vl_i64_info, avx512vl_i64_info>, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VPERMT2W : avx512_perm_t_sizes_w<0x7D, "vpermt2w",
- avx512vl_i16_info, avx512vl_i16_info>, VEX_W, EVEX_CD8<16, CD8VF>;
+defm VPERMT2W : avx512_perm_t_sizes_bw<0x7D, "vpermt2w",
+ avx512vl_i16_info, avx512vl_i16_info, HasBWI>,
+ VEX_W, EVEX_CD8<16, CD8VF>;
+defm VPERMT2B : avx512_perm_t_sizes_bw<0x7D, "vpermt2b",
+ avx512vl_i8_info, avx512vl_i8_info, HasVBMI>,
+ EVEX_CD8<8, CD8VF>;
defm VPERMT2PS : avx512_perm_t_sizes<0x7F, "vpermt2ps",
avx512vl_f32_info, avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
defm VPERMT2PD : avx512_perm_t_sizes<0x7F, "vpermt2pd",
@@ -1283,6 +1253,7 @@ defm VPERMT2PD : avx512_perm_t_sizes<0x7F, "vpermt2pd",
//
multiclass avx512_blendmask<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
let ExeDomain = _.ExeDomain in {
+ let hasSideEffects = 0 in
def rr : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
(ins _.RC:$src1, _.RC:$src2),
!strconcat(OpcodeStr,
@@ -1292,14 +1263,16 @@ multiclass avx512_blendmask<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
(ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
- [(set _.RC:$dst, (X86select _.KRCWM:$mask, (_.VT _.RC:$src1),
- (_.VT _.RC:$src2)))]>, EVEX_4V, EVEX_K;
+ [(set _.RC:$dst, (vselect _.KRCWM:$mask,
+ (_.VT _.RC:$src2),
+ (_.VT _.RC:$src1)))]>, EVEX_4V, EVEX_K;
+ let hasSideEffects = 0 in
def rrkz : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
(ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src1, $src2}"),
[]>, EVEX_4V, EVEX_KZ;
- let mayLoad = 1 in {
+ let mayLoad = 1, hasSideEffects = 0 in
def rm : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
(ins _.RC:$src1, _.MemOp:$src2),
!strconcat(OpcodeStr,
@@ -1309,16 +1282,17 @@ multiclass avx512_blendmask<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
(ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
- [(set _.RC:$dst, (X86select _.KRCWM:$mask, (_.VT _.RC:$src1),
- (_.VT (bitconvert (_.LdFrag addr:$src2)))))]>,
+ [(set _.RC:$dst, (vselect _.KRCWM:$mask,
+ (_.VT (bitconvert (_.LdFrag addr:$src2))),
+ (_.VT _.RC:$src1)))]>,
EVEX_4V, EVEX_K, EVEX_CD8<_.EltSize, CD8VF>;
+ let mayLoad = 1, hasSideEffects = 0 in
def rmkz : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
(ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src1, $src2}"),
[]>, EVEX_4V, EVEX_KZ, EVEX_CD8<_.EltSize, CD8VF>;
}
- }
}
multiclass avx512_blendmask_rmb<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
@@ -1327,10 +1301,12 @@ multiclass avx512_blendmask_rmb<bits<8> opc, string OpcodeStr, X86VectorVTInfo _
!strconcat(OpcodeStr,
"\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
"$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
- [(set _.RC:$dst,(X86select _.KRCWM:$mask, (_.VT _.RC:$src1),
- (X86VBroadcast (_.ScalarLdFrag addr:$src2))))]>,
+ [(set _.RC:$dst,(vselect _.KRCWM:$mask,
+ (X86VBroadcast (_.ScalarLdFrag addr:$src2)),
+ (_.VT _.RC:$src1)))]>,
EVEX_4V, EVEX_K, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
+ let mayLoad = 1, hasSideEffects = 0 in
def rmb : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
(ins _.RC:$src1, _.ScalarMemOp:$src2),
!strconcat(OpcodeStr,
@@ -1373,7 +1349,7 @@ defm VPBLENDMB : blendmask_bw <0x66, "vpblendmb", avx512vl_i8_info>;
defm VPBLENDMW : blendmask_bw <0x66, "vpblendmw", avx512vl_i16_info>, VEX_W;
-let Predicates = [HasAVX512] in {
+let Predicates = [HasAVX512, NoVLX] in {
def : Pat<(v8f32 (vselect (v8i1 VK8WM:$mask), (v8f32 VR256X:$src1),
(v8f32 VR256X:$src2))),
(EXTRACT_SUBREG
@@ -1404,15 +1380,14 @@ multiclass avx512_cmp_scalar<X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeRnd>
(OpNode (_.VT _.RC:$src1),
(_.VT _.RC:$src2),
imm:$cc)>, EVEX_4V;
- let mayLoad = 1 in
- defm rm_Int : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
- (outs _.KRC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2, AVXCC:$cc),
- "vcmp${cc}"#_.Suffix,
- "$src2, $src1", "$src1, $src2",
- (OpNode (_.VT _.RC:$src1),
- (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
- imm:$cc)>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
+ defm rm_Int : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
+ (outs _.KRC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2, AVXCC:$cc),
+ "vcmp${cc}"#_.Suffix,
+ "$src2, $src1", "$src1, $src2",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
+ imm:$cc)>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
defm rrb_Int : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
(outs _.KRC:$dst),
@@ -1432,7 +1407,7 @@ multiclass avx512_cmp_scalar<X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeRnd>
"$cc, $src2, $src1", "$src1, $src2, $cc">, EVEX_4V;
defm rmi_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcMem, _,
(outs _.KRC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2, u8imm:$cc),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$cc),
"vcmp"#_.Suffix,
"$cc, $src2, $src1", "$src1, $src2, $cc">,
EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
@@ -1454,16 +1429,15 @@ multiclass avx512_cmp_scalar<X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeRnd>
_.FRC:$src2,
imm:$cc))],
IIC_SSE_ALU_F32S_RR>, EVEX_4V;
- let mayLoad = 1 in
- def rm : AVX512Ii8<0xC2, MRMSrcMem,
- (outs _.KRC:$dst),
- (ins _.FRC:$src1, _.ScalarMemOp:$src2, AVXCC:$cc),
- !strconcat("vcmp${cc}", _.Suffix,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set _.KRC:$dst, (OpNode _.FRC:$src1,
- (_.ScalarLdFrag addr:$src2),
- imm:$cc))],
- IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
+ def rm : AVX512Ii8<0xC2, MRMSrcMem,
+ (outs _.KRC:$dst),
+ (ins _.FRC:$src1, _.ScalarMemOp:$src2, AVXCC:$cc),
+ !strconcat("vcmp${cc}", _.Suffix,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set _.KRC:$dst, (OpNode _.FRC:$src1,
+ (_.ScalarLdFrag addr:$src2),
+ imm:$cc))],
+ IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
}
}
@@ -1481,7 +1455,6 @@ multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2)))],
IIC_SSE_ALU_F32P_RR>, EVEX_4V;
- let mayLoad = 1 in
def rm : AVX512BI<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -1495,7 +1468,6 @@ multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
[(set _.KRC:$dst, (and _.KRCWM:$mask,
(OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))))],
IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
- let mayLoad = 1 in
def rmk : AVX512BI<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}}|",
@@ -1510,7 +1482,6 @@ multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
multiclass avx512_icmp_packed_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> :
avx512_icmp_packed<opc, OpcodeStr, OpNode, _> {
- let mayLoad = 1 in {
def rmb : AVX512BI<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2),
!strconcat(OpcodeStr, "\t{${src2}", _.BroadcastStr, ", $src1, $dst",
@@ -1529,7 +1500,6 @@ multiclass avx512_icmp_packed_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
(X86VBroadcast
(_.ScalarLdFrag addr:$src2)))))],
IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
- }
}
multiclass avx512_icmp_packed_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -1612,7 +1582,6 @@ multiclass avx512_icmp_cc<bits<8> opc, string Suffix, SDNode OpNode,
[(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
imm:$cc))],
IIC_SSE_ALU_F32P_RR>, EVEX_4V;
- let mayLoad = 1 in
def rmi : AVX512AIi8<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, AVX512ICC:$cc),
!strconcat("vpcmp${cc}", Suffix,
@@ -1631,7 +1600,6 @@ multiclass avx512_icmp_cc<bits<8> opc, string Suffix, SDNode OpNode,
(OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
imm:$cc)))],
IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
- let mayLoad = 1 in
def rmik : AVX512AIi8<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2,
AVX512ICC:$cc),
@@ -1774,25 +1742,23 @@ multiclass avx512_vcmp_common<X86VectorVTInfo _> {
(_.VT _.RC:$src2),
imm:$cc)>;
- let mayLoad = 1 in {
- defm rmi : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
- (outs _.KRC:$dst),(ins _.RC:$src1, _.MemOp:$src2, AVXCC:$cc),
- "vcmp${cc}"#_.Suffix,
- "$src2, $src1", "$src1, $src2",
- (X86cmpm (_.VT _.RC:$src1),
- (_.VT (bitconvert (_.LdFrag addr:$src2))),
- imm:$cc)>;
-
- defm rmbi : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
- (outs _.KRC:$dst),
- (ins _.RC:$src1, _.ScalarMemOp:$src2, AVXCC:$cc),
- "vcmp${cc}"#_.Suffix,
- "${src2}"##_.BroadcastStr##", $src1",
- "$src1, ${src2}"##_.BroadcastStr,
- (X86cmpm (_.VT _.RC:$src1),
- (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
- imm:$cc)>,EVEX_B;
- }
+ defm rmi : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
+ (outs _.KRC:$dst),(ins _.RC:$src1, _.MemOp:$src2, AVXCC:$cc),
+ "vcmp${cc}"#_.Suffix,
+ "$src2, $src1", "$src1, $src2",
+ (X86cmpm (_.VT _.RC:$src1),
+ (_.VT (bitconvert (_.LdFrag addr:$src2))),
+ imm:$cc)>;
+
+ defm rmbi : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
+ (outs _.KRC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2, AVXCC:$cc),
+ "vcmp${cc}"#_.Suffix,
+ "${src2}"##_.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_.BroadcastStr,
+ (X86cmpm (_.VT _.RC:$src1),
+ (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
+ imm:$cc)>,EVEX_B;
// Accept explicit immediate argument form instead of comparison code.
let isAsmParserOnly = 1, hasSideEffects = 0 in {
defm rri_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcReg, _,
@@ -1888,10 +1854,10 @@ multiclass avx512_scalar_fpclass<bits<8> opc, string OpcodeStr, SDNode OpNode,
(ins _.KRCWM:$mask, _.RC:$src1, i32u8imm:$src2),
OpcodeStr##_.Suffix#
"\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
- [(set _.KRC:$dst,(or _.KRCWM:$mask,
+ [(set _.KRC:$dst,(or _.KRCWM:$mask,
(OpNode (_.VT _.RC:$src1),
(i32 imm:$src2))))], NoItinerary>, EVEX_K;
- let mayLoad = 1, AddedComplexity = 20 in {
+ let AddedComplexity = 20 in {
def rm : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
(ins _.MemOp:$src1, i32u8imm:$src2),
OpcodeStr##_.Suffix##
@@ -1903,7 +1869,7 @@ multiclass avx512_scalar_fpclass<bits<8> opc, string OpcodeStr, SDNode OpNode,
(ins _.KRCWM:$mask, _.MemOp:$src1, i32u8imm:$src2),
OpcodeStr##_.Suffix##
"\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
- [(set _.KRC:$dst,(or _.KRCWM:$mask,
+ [(set _.KRC:$dst,(or _.KRCWM:$mask,
(OpNode (_.VT (bitconvert (_.LdFrag addr:$src1))),
(i32 imm:$src2))))], NoItinerary>, EVEX_K;
}
@@ -1924,51 +1890,49 @@ multiclass avx512_vector_fpclass<bits<8> opc, string OpcodeStr, SDNode OpNode,
(ins _.KRCWM:$mask, _.RC:$src1, i32u8imm:$src2),
OpcodeStr##_.Suffix#
"\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
- [(set _.KRC:$dst,(or _.KRCWM:$mask,
+ [(set _.KRC:$dst,(or _.KRCWM:$mask,
(OpNode (_.VT _.RC:$src1),
(i32 imm:$src2))))], NoItinerary>, EVEX_K;
- let mayLoad = 1 in {
- def rm : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
- (ins _.MemOp:$src1, i32u8imm:$src2),
- OpcodeStr##_.Suffix##mem#
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set _.KRC:$dst,(OpNode
- (_.VT (bitconvert (_.LdFrag addr:$src1))),
- (i32 imm:$src2)))], NoItinerary>;
- def rmk : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
- (ins _.KRCWM:$mask, _.MemOp:$src1, i32u8imm:$src2),
- OpcodeStr##_.Suffix##mem#
- "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
- [(set _.KRC:$dst, (or _.KRCWM:$mask, (OpNode
- (_.VT (bitconvert (_.LdFrag addr:$src1))),
- (i32 imm:$src2))))], NoItinerary>, EVEX_K;
- def rmb : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
- (ins _.ScalarMemOp:$src1, i32u8imm:$src2),
- OpcodeStr##_.Suffix##broadcast##"\t{$src2, ${src1}"##
- _.BroadcastStr##", $dst|$dst, ${src1}"
- ##_.BroadcastStr##", $src2}",
- [(set _.KRC:$dst,(OpNode
- (_.VT (X86VBroadcast
- (_.ScalarLdFrag addr:$src1))),
- (i32 imm:$src2)))], NoItinerary>,EVEX_B;
- def rmbk : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
- (ins _.KRCWM:$mask, _.ScalarMemOp:$src1, i32u8imm:$src2),
- OpcodeStr##_.Suffix##broadcast##"\t{$src2, ${src1}"##
- _.BroadcastStr##", $dst {${mask}}|$dst {${mask}}, ${src1}"##
- _.BroadcastStr##", $src2}",
- [(set _.KRC:$dst,(or _.KRCWM:$mask, (OpNode
- (_.VT (X86VBroadcast
- (_.ScalarLdFrag addr:$src1))),
- (i32 imm:$src2))))], NoItinerary>,
- EVEX_B, EVEX_K;
- }
+ def rm : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
+ (ins _.MemOp:$src1, i32u8imm:$src2),
+ OpcodeStr##_.Suffix##mem#
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set _.KRC:$dst,(OpNode
+ (_.VT (bitconvert (_.LdFrag addr:$src1))),
+ (i32 imm:$src2)))], NoItinerary>;
+ def rmk : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
+ (ins _.KRCWM:$mask, _.MemOp:$src1, i32u8imm:$src2),
+ OpcodeStr##_.Suffix##mem#
+ "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
+ [(set _.KRC:$dst, (or _.KRCWM:$mask, (OpNode
+ (_.VT (bitconvert (_.LdFrag addr:$src1))),
+ (i32 imm:$src2))))], NoItinerary>, EVEX_K;
+ def rmb : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
+ (ins _.ScalarMemOp:$src1, i32u8imm:$src2),
+ OpcodeStr##_.Suffix##broadcast##"\t{$src2, ${src1}"##
+ _.BroadcastStr##", $dst|$dst, ${src1}"
+ ##_.BroadcastStr##", $src2}",
+ [(set _.KRC:$dst,(OpNode
+ (_.VT (X86VBroadcast
+ (_.ScalarLdFrag addr:$src1))),
+ (i32 imm:$src2)))], NoItinerary>,EVEX_B;
+ def rmbk : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
+ (ins _.KRCWM:$mask, _.ScalarMemOp:$src1, i32u8imm:$src2),
+ OpcodeStr##_.Suffix##broadcast##"\t{$src2, ${src1}"##
+ _.BroadcastStr##", $dst {${mask}}|$dst {${mask}}, ${src1}"##
+ _.BroadcastStr##", $src2}",
+ [(set _.KRC:$dst,(or _.KRCWM:$mask, (OpNode
+ (_.VT (X86VBroadcast
+ (_.ScalarLdFrag addr:$src1))),
+ (i32 imm:$src2))))], NoItinerary>,
+ EVEX_B, EVEX_K;
}
multiclass avx512_vector_fpclass_all<string OpcodeStr,
- AVX512VLVectorVTInfo _, bits<8> opc, SDNode OpNode, Predicate prd,
+ AVX512VLVectorVTInfo _, bits<8> opc, SDNode OpNode, Predicate prd,
string broadcast>{
let Predicates = [prd] in {
- defm Z : avx512_vector_fpclass<opc, OpcodeStr, OpNode, _.info512, "{z}",
+ defm Z : avx512_vector_fpclass<opc, OpcodeStr, OpNode, _.info512, "{z}",
broadcast>, EVEX_V512;
}
let Predicates = [prd, HasVLX] in {
@@ -1981,9 +1945,9 @@ multiclass avx512_vector_fpclass_all<string OpcodeStr,
multiclass avx512_fp_fpclass_all<string OpcodeStr, bits<8> opcVec,
bits<8> opcScalar, SDNode VecOpNode, SDNode ScalarOpNode, Predicate prd>{
- defm PS : avx512_vector_fpclass_all<OpcodeStr, avx512vl_f32_info, opcVec,
+ defm PS : avx512_vector_fpclass_all<OpcodeStr, avx512vl_f32_info, opcVec,
VecOpNode, prd, "{l}">, EVEX_CD8<32, CD8VF>;
- defm PD : avx512_vector_fpclass_all<OpcodeStr, avx512vl_f64_info, opcVec,
+ defm PD : avx512_vector_fpclass_all<OpcodeStr, avx512vl_f64_info, opcVec,
VecOpNode, prd, "{q}">,EVEX_CD8<64, CD8VF> , VEX_W;
defm SS : avx512_scalar_fpclass<opcScalar, OpcodeStr, ScalarOpNode,
f32x_info, prd>, EVEX_CD8<32, CD8VT1>;
@@ -2003,18 +1967,15 @@ defm VFPCLASS : avx512_fp_fpclass_all<"vfpclass", 0x66, 0x67, X86Vfpclass,
multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
string OpcodeStr, RegisterClass KRC,
ValueType vvt, X86MemOperand x86memop> {
- let hasSideEffects = 0 in {
- def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
- let mayLoad = 1 in
- def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set KRC:$dst, (vvt (load addr:$src)))]>;
- let mayStore = 1 in
- def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(store KRC:$src, addr:$dst)]>;
- }
+ let hasSideEffects = 0 in
+ def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
+ def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set KRC:$dst, (vvt (load addr:$src)))]>;
+ def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(store KRC:$src, addr:$dst)]>;
}
multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
@@ -2043,9 +2004,6 @@ let Predicates = [HasBWI] in {
VEX, PD, VEX_W;
defm KMOVD : avx512_mask_mov_gpr<0x92, 0x93, "kmovd", VK32, GR32>,
VEX, XD;
-}
-
-let Predicates = [HasBWI] in {
defm KMOVQ : avx512_mask_mov<0x90, 0x90, 0x91, "kmovq", VK64, v64i1, i64mem>,
VEX, PS, VEX_W;
defm KMOVQ : avx512_mask_mov_gpr<0x92, 0x93, "kmovq", VK64, GR64>,
@@ -2058,12 +2016,20 @@ let Predicates = [HasDQI] in {
(KMOVBkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit))>;
def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
(EXTRACT_SUBREG (KMOVBrk VK8:$src), sub_8bit)>;
+ def : Pat<(i32 (zext (i8 (bitconvert (v8i1 VK8:$src))))),
+ (KMOVBrk VK8:$src)>;
+ def : Pat<(i32 (anyext (i8 (bitconvert (v8i1 VK8:$src))))),
+ (KMOVBrk VK8:$src)>;
}
let Predicates = [HasAVX512] in {
def : Pat<(v16i1 (bitconvert (i16 GR16:$src))),
(KMOVWkr (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit))>;
def : Pat<(i16 (bitconvert (v16i1 VK16:$src))),
(EXTRACT_SUBREG (KMOVWrk VK16:$src), sub_16bit)>;
+ def : Pat<(i32 (zext (i16 (bitconvert (v16i1 VK16:$src))))),
+ (KMOVWrk VK16:$src)>;
+ def : Pat<(i32 (anyext (i16 (bitconvert (v16i1 VK16:$src))))),
+ (KMOVWrk VK16:$src)>;
}
let Predicates = [HasBWI] in {
def : Pat<(v32i1 (bitconvert (i32 GR32:$src))), (KMOVDkr GR32:$src)>;
@@ -2085,20 +2051,45 @@ let Predicates = [HasDQI] in {
(KMOVBmk addr:$dst, (COPY_TO_REGCLASS VK4:$src, VK8))>;
def : Pat<(store VK2:$src, addr:$dst),
(KMOVBmk addr:$dst, (COPY_TO_REGCLASS VK2:$src, VK8))>;
+ def : Pat<(store VK1:$src, addr:$dst),
+ (KMOVBmk addr:$dst, (COPY_TO_REGCLASS VK1:$src, VK8))>;
+
+ def : Pat<(v2i1 (load addr:$src)),
+ (COPY_TO_REGCLASS (KMOVBkm addr:$src), VK2)>;
+ def : Pat<(v4i1 (load addr:$src)),
+ (COPY_TO_REGCLASS (KMOVBkm addr:$src), VK4)>;
}
let Predicates = [HasAVX512, NoDQI] in {
- def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst),
- (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK8:$src, VK16))>;
- def : Pat<(v8i1 (bitconvert (i8 (load addr:$src)))),
- (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK8)>;
+ def : Pat<(store VK1:$src, addr:$dst),
+ (MOV8mr addr:$dst,
+ (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)),
+ sub_8bit))>;
+ def : Pat<(store VK2:$src, addr:$dst),
+ (MOV8mr addr:$dst,
+ (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK2:$src, VK16)),
+ sub_8bit))>;
+ def : Pat<(store VK4:$src, addr:$dst),
+ (MOV8mr addr:$dst,
+ (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK4:$src, VK16)),
+ sub_8bit))>;
+ def : Pat<(store VK8:$src, addr:$dst),
+ (MOV8mr addr:$dst,
+ (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
+ sub_8bit))>;
+
+ def : Pat<(v8i1 (load addr:$src)),
+ (COPY_TO_REGCLASS (MOVZX32rm8 addr:$src), VK8)>;
+ def : Pat<(v2i1 (load addr:$src)),
+ (COPY_TO_REGCLASS (MOVZX32rm8 addr:$src), VK2)>;
+ def : Pat<(v4i1 (load addr:$src)),
+ (COPY_TO_REGCLASS (MOVZX32rm8 addr:$src), VK4)>;
}
+
let Predicates = [HasAVX512] in {
def : Pat<(store (i16 (bitconvert (v16i1 VK16:$src))), addr:$dst),
(KMOVWmk addr:$dst, VK16:$src)>;
def : Pat<(i1 (load addr:$src)),
- (COPY_TO_REGCLASS (AND16ri (i16 (SUBREG_TO_REG (i32 0),
- (MOV8rm addr:$src), sub_8bit)),
- (i16 1)), VK1)>;
+ (COPY_TO_REGCLASS (AND32ri8 (MOVZX32rm8 addr:$src), (i32 1)), VK1)>;
def : Pat<(v16i1 (bitconvert (i16 (load addr:$src)))),
(KMOVWkm addr:$src)>;
}
@@ -2107,51 +2098,71 @@ let Predicates = [HasBWI] in {
(KMOVDmk addr:$dst, VK32:$src)>;
def : Pat<(v32i1 (bitconvert (i32 (load addr:$src)))),
(KMOVDkm addr:$src)>;
-}
-let Predicates = [HasBWI] in {
def : Pat<(store (i64 (bitconvert (v64i1 VK64:$src))), addr:$dst),
(KMOVQmk addr:$dst, VK64:$src)>;
def : Pat<(v64i1 (bitconvert (i64 (load addr:$src)))),
(KMOVQkm addr:$src)>;
}
+def assertzext_i1 : PatFrag<(ops node:$src), (assertzext node:$src), [{
+ return cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i1;
+}]>;
+
let Predicates = [HasAVX512] in {
def : Pat<(i1 (trunc (i64 GR64:$src))),
- (COPY_TO_REGCLASS (KMOVWkr (AND32ri (EXTRACT_SUBREG $src, sub_32bit),
- (i32 1))), VK1)>;
+ (COPY_TO_REGCLASS (i16 (EXTRACT_SUBREG (AND64ri8 $src, (i64 1)),
+ sub_16bit)), VK1)>;
+
+ def : Pat<(i1 (trunc (i64 (assertzext_i1 GR64:$src)))),
+ (COPY_TO_REGCLASS (i16 (EXTRACT_SUBREG $src, sub_16bit)), VK1)>;
def : Pat<(i1 (trunc (i32 GR32:$src))),
- (COPY_TO_REGCLASS (KMOVWkr (AND32ri $src, (i32 1))), VK1)>;
+ (COPY_TO_REGCLASS (i16 (EXTRACT_SUBREG (AND32ri8 $src, (i32 1)),
+ sub_16bit)), VK1)>;
+
+ def : Pat<(i1 (trunc (i32 (assertzext_i1 GR32:$src)))),
+ (COPY_TO_REGCLASS (i16 (EXTRACT_SUBREG $src, sub_16bit)), VK1)>;
def : Pat<(i1 (trunc (i8 GR8:$src))),
- (COPY_TO_REGCLASS
- (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit), (i32 1))),
- VK1)>;
+ (COPY_TO_REGCLASS (i16 (SUBREG_TO_REG (i64 0), (AND8ri8 $src, (i8 1)),
+ sub_8bit)), VK1)>;
+
+ def : Pat<(i1 (trunc (i8 (assertzext_i1 GR8:$src)))),
+ (COPY_TO_REGCLASS (i16 (SUBREG_TO_REG (i64 0), $src, sub_8bit)), VK1)>;
+
def : Pat<(i1 (trunc (i16 GR16:$src))),
- (COPY_TO_REGCLASS
- (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), $src, sub_16bit), (i32 1))),
- VK1)>;
+ (COPY_TO_REGCLASS (AND16ri GR16:$src, (i16 1)), VK1)>;
+
+ def : Pat<(i1 (trunc (i16 (assertzext_i1 GR16:$src)))),
+ (COPY_TO_REGCLASS $src, VK1)>;
def : Pat<(i32 (zext VK1:$src)),
- (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1))>;
+ (i32 (SUBREG_TO_REG (i64 0), (i16 (COPY_TO_REGCLASS $src, GR16)),
+ sub_16bit))>;
+
def : Pat<(i32 (anyext VK1:$src)),
- (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16))>;
+ (i32 (SUBREG_TO_REG (i64 0), (i16 (COPY_TO_REGCLASS $src, GR16)),
+ sub_16bit))>;
def : Pat<(i8 (zext VK1:$src)),
- (EXTRACT_SUBREG
- (AND32ri (KMOVWrk
- (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)), sub_8bit)>;
+ (i8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS VK1:$src, GR16)), sub_8bit))>;
+
def : Pat<(i8 (anyext VK1:$src)),
- (EXTRACT_SUBREG
- (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), sub_8bit)>;
+ (i8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS $src, GR16)), sub_8bit))>;
def : Pat<(i64 (zext VK1:$src)),
- (AND64ri8 (SUBREG_TO_REG (i64 0),
- (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), sub_32bit), (i64 1))>;
+ (i64 (SUBREG_TO_REG (i64 0), (i16 (COPY_TO_REGCLASS $src, GR16)),
+ sub_16bit))>;
+
+ def : Pat<(i64 (anyext VK1:$src)),
+ (i64 (SUBREG_TO_REG (i64 0), (i16 (COPY_TO_REGCLASS $src, GR16)),
+ sub_16bit))>;
+
def : Pat<(i16 (zext VK1:$src)),
- (EXTRACT_SUBREG
- (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)),
- sub_16bit)>;
+ (COPY_TO_REGCLASS $src, GR16)>;
+
+ def : Pat<(i16 (anyext VK1:$src)),
+ (i16 (COPY_TO_REGCLASS $src, GR16))>;
}
def : Pat<(v16i1 (scalar_to_vector VK1:$src)),
(COPY_TO_REGCLASS VK1:$src, VK16)>;
@@ -2166,17 +2177,24 @@ def : Pat<(v32i1 (scalar_to_vector VK1:$src)),
def : Pat<(v64i1 (scalar_to_vector VK1:$src)),
(COPY_TO_REGCLASS VK1:$src, VK64)>;
+def : Pat<(store (i1 -1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
+def : Pat<(store (i1 1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
+def : Pat<(store (i1 0), addr:$dst), (MOV8mi addr:$dst, (i8 0))>;
// With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
let Predicates = [HasAVX512, NoDQI] in {
// GR from/to 8-bit mask without native support
def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
(COPY_TO_REGCLASS
- (KMOVWkr (MOVZX32rr8 GR8 :$src)), VK8)>;
+ (KMOVWkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)), VK8)>;
def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
(EXTRACT_SUBREG
(KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
sub_8bit)>;
+ def : Pat<(i32 (zext (i8 (bitconvert (v8i1 VK8:$src))))),
+ (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16))>;
+ def : Pat<(i32 (anyext (i8 (bitconvert (v8i1 VK8:$src))))),
+ (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16))>;
}
let Predicates = [HasAVX512] in {
@@ -2419,7 +2437,6 @@ multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
let Predicates = [HasBWI] in {
defm Q : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "q"), VK64, OpNode>,
VEX, TAPD, VEX_W;
- let Predicates = [HasDQI] in
defm D : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "d"), VK32, OpNode>,
VEX, TAPD;
}
@@ -2456,82 +2473,61 @@ let Predicates = [HasAVX512] in {
def : Pat<(i1 1), (COPY_TO_REGCLASS (KSHIFTRWri (KSET1W), (i8 15)), VK1)>;
def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSHIFTRWri (KSET1W), (i8 15)), VK1)>;
}
-def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))),
- (v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>;
-def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))),
- (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>;
+// Patterns for kmask insert_subvector/extract_subvector to/from index=0
+multiclass operation_subvector_mask_lowering<RegisterClass subRC, ValueType subVT,
+ RegisterClass RC, ValueType VT> {
+ def : Pat<(subVT (extract_subvector (VT RC:$src), (iPTR 0))),
+ (subVT (COPY_TO_REGCLASS RC:$src, subRC))>;
-def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
- (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
+ def : Pat<(VT (insert_subvector undef, subRC:$src, (iPTR 0))),
+ (VT (COPY_TO_REGCLASS subRC:$src, RC))>;
+}
-def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 0))),
- (v16i1 (COPY_TO_REGCLASS VK32:$src, VK16))>;
+defm : operation_subvector_mask_lowering<VK2, v2i1, VK4, v4i1>;
+defm : operation_subvector_mask_lowering<VK2, v2i1, VK8, v8i1>;
+defm : operation_subvector_mask_lowering<VK2, v2i1, VK16, v16i1>;
+defm : operation_subvector_mask_lowering<VK2, v2i1, VK32, v32i1>;
+defm : operation_subvector_mask_lowering<VK2, v2i1, VK64, v64i1>;
-def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 16))),
- (v16i1 (COPY_TO_REGCLASS (KSHIFTRDri VK32:$src, (i8 16)), VK16))>;
+defm : operation_subvector_mask_lowering<VK4, v4i1, VK8, v8i1>;
+defm : operation_subvector_mask_lowering<VK4, v4i1, VK16, v16i1>;
+defm : operation_subvector_mask_lowering<VK4, v4i1, VK32, v32i1>;
+defm : operation_subvector_mask_lowering<VK4, v4i1, VK64, v64i1>;
-def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 0))),
- (v32i1 (COPY_TO_REGCLASS VK64:$src, VK32))>;
+defm : operation_subvector_mask_lowering<VK8, v8i1, VK16, v16i1>;
+defm : operation_subvector_mask_lowering<VK8, v8i1, VK32, v32i1>;
+defm : operation_subvector_mask_lowering<VK8, v8i1, VK64, v64i1>;
-def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 32))),
- (v32i1 (COPY_TO_REGCLASS (KSHIFTRQri VK64:$src, (i8 32)), VK32))>;
+defm : operation_subvector_mask_lowering<VK16, v16i1, VK32, v32i1>;
+defm : operation_subvector_mask_lowering<VK16, v16i1, VK64, v64i1>;
-def : Pat<(v4i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))),
- (v4i1 (COPY_TO_REGCLASS VK8:$src, VK4))>;
-
-def : Pat<(v2i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))),
- (v2i1 (COPY_TO_REGCLASS VK8:$src, VK2))>;
-
-def : Pat<(v4i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))),
- (v4i1 (COPY_TO_REGCLASS VK2:$src, VK4))>;
-
-def : Pat<(v8i1 (insert_subvector undef, (v4i1 VK4:$src), (iPTR 0))),
- (v8i1 (COPY_TO_REGCLASS VK4:$src, VK8))>;
-def : Pat<(v8i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))),
- (v8i1 (COPY_TO_REGCLASS VK2:$src, VK8))>;
-
-def : Pat<(v32i1 (insert_subvector undef, VK2:$src, (iPTR 0))),
- (v32i1 (COPY_TO_REGCLASS VK2:$src, VK32))>;
-def : Pat<(v32i1 (insert_subvector undef, VK4:$src, (iPTR 0))),
- (v32i1 (COPY_TO_REGCLASS VK4:$src, VK32))>;
-def : Pat<(v32i1 (insert_subvector undef, VK8:$src, (iPTR 0))),
- (v32i1 (COPY_TO_REGCLASS VK8:$src, VK32))>;
-def : Pat<(v32i1 (insert_subvector undef, VK16:$src, (iPTR 0))),
- (v32i1 (COPY_TO_REGCLASS VK16:$src, VK32))>;
-
-def : Pat<(v64i1 (insert_subvector undef, VK2:$src, (iPTR 0))),
- (v64i1 (COPY_TO_REGCLASS VK2:$src, VK64))>;
-def : Pat<(v64i1 (insert_subvector undef, VK4:$src, (iPTR 0))),
- (v64i1 (COPY_TO_REGCLASS VK4:$src, VK64))>;
-def : Pat<(v64i1 (insert_subvector undef, VK8:$src, (iPTR 0))),
- (v64i1 (COPY_TO_REGCLASS VK8:$src, VK64))>;
-def : Pat<(v64i1 (insert_subvector undef, VK16:$src, (iPTR 0))),
- (v64i1 (COPY_TO_REGCLASS VK16:$src, VK64))>;
-def : Pat<(v64i1 (insert_subvector undef, VK32:$src, (iPTR 0))),
- (v64i1 (COPY_TO_REGCLASS VK32:$src, VK64))>;
+defm : operation_subvector_mask_lowering<VK32, v32i1, VK64, v64i1>;
+def : Pat<(v2i1 (extract_subvector (v4i1 VK4:$src), (iPTR 2))),
+ (v2i1 (COPY_TO_REGCLASS
+ (KSHIFTRWri (COPY_TO_REGCLASS VK4:$src, VK16), (i8 2)),
+ VK2))>;
+def : Pat<(v4i1 (extract_subvector (v8i1 VK8:$src), (iPTR 4))),
+ (v4i1 (COPY_TO_REGCLASS
+ (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16), (i8 4)),
+ VK4))>;
+def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
+ (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
+def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 16))),
+ (v16i1 (COPY_TO_REGCLASS (KSHIFTRDri VK32:$src, (i8 16)), VK16))>;
+def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 32))),
+ (v32i1 (COPY_TO_REGCLASS (KSHIFTRQri VK64:$src, (i8 32)), VK32))>;
def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))),
(v8i1 (COPY_TO_REGCLASS
(KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16),
(I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>;
-def : Pat<(v8i1 (X86vsrli VK8:$src, (i8 imm:$imm))),
- (v8i1 (COPY_TO_REGCLASS
- (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16),
- (I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>;
-
def : Pat<(v4i1 (X86vshli VK4:$src, (i8 imm:$imm))),
(v4i1 (COPY_TO_REGCLASS
(KSHIFTLWri (COPY_TO_REGCLASS VK4:$src, VK16),
(I8Imm $imm)), VK4))>, Requires<[HasAVX512]>;
-
-def : Pat<(v4i1 (X86vsrli VK4:$src, (i8 imm:$imm))),
- (v4i1 (COPY_TO_REGCLASS
- (KSHIFTRWri (COPY_TO_REGCLASS VK4:$src, VK16),
- (I8Imm $imm)), VK4))>, Requires<[HasAVX512]>;
-
//===----------------------------------------------------------------------===//
// AVX-512 - Aligned and unaligned load and store
//
@@ -2539,7 +2535,8 @@ def : Pat<(v4i1 (X86vsrli VK4:$src, (i8 imm:$imm))),
multiclass avx512_load<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
PatFrag ld_frag, PatFrag mload,
- bit IsReMaterializable = 1> {
+ bit IsReMaterializable = 1,
+ SDPatternOperator SelectOprr = vselect> {
let hasSideEffects = 0 in {
def rr : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst), (ins _.RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [],
@@ -2547,7 +2544,10 @@ multiclass avx512_load<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
def rrkz : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst),
(ins _.KRCWM:$mask, _.RC:$src),
!strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|",
- "${dst} {${mask}} {z}, $src}"), [], _.ExeDomain>,
+ "${dst} {${mask}} {z}, $src}"),
+ [(set _.RC:$dst, (_.VT (vselect _.KRCWM:$mask,
+ (_.VT _.RC:$src),
+ _.ImmAllZerosV)))], _.ExeDomain>,
EVEX, EVEX_KZ;
let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable,
@@ -2562,11 +2562,11 @@ multiclass avx512_load<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
(ins _.RC:$src0, _.KRCWM:$mask, _.RC:$src1),
!strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
"${dst} {${mask}}, $src1}"),
- [(set _.RC:$dst, (_.VT (vselect _.KRCWM:$mask,
+ [(set _.RC:$dst, (_.VT (SelectOprr _.KRCWM:$mask,
(_.VT _.RC:$src1),
(_.VT _.RC:$src0))))], _.ExeDomain>,
EVEX, EVEX_K;
- let mayLoad = 1, SchedRW = [WriteLoad] in
+ let SchedRW = [WriteLoad] in
def rmk : AVX512PI<opc, MRMSrcMem, (outs _.RC:$dst),
(ins _.RC:$src0, _.KRCWM:$mask, _.MemOp:$src1),
!strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
@@ -2576,7 +2576,7 @@ multiclass avx512_load<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
(_.VT (bitconvert (ld_frag addr:$src1))),
(_.VT _.RC:$src0))))], _.ExeDomain>, EVEX, EVEX_K;
}
- let mayLoad = 1, SchedRW = [WriteLoad] in
+ let SchedRW = [WriteLoad] in
def rmkz : AVX512PI<opc, MRMSrcMem, (outs _.RC:$dst),
(ins _.KRCWM:$mask, _.MemOp:$src),
OpcodeStr #"\t{$src, ${dst} {${mask}} {z}|"#
@@ -2615,22 +2615,27 @@ multiclass avx512_alignedload_vl<bits<8> opc, string OpcodeStr,
multiclass avx512_load_vl<bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo _,
Predicate prd,
- bit IsReMaterializable = 1> {
+ bit IsReMaterializable = 1,
+ SDPatternOperator SelectOprr = vselect> {
let Predicates = [prd] in
defm Z : avx512_load<opc, OpcodeStr, _.info512, _.info512.LdFrag,
- masked_load_unaligned, IsReMaterializable>, EVEX_V512;
+ masked_load_unaligned, IsReMaterializable,
+ SelectOprr>, EVEX_V512;
let Predicates = [prd, HasVLX] in {
defm Z256 : avx512_load<opc, OpcodeStr, _.info256, _.info256.LdFrag,
- masked_load_unaligned, IsReMaterializable>, EVEX_V256;
+ masked_load_unaligned, IsReMaterializable,
+ SelectOprr>, EVEX_V256;
defm Z128 : avx512_load<opc, OpcodeStr, _.info128, _.info128.LdFrag,
- masked_load_unaligned, IsReMaterializable>, EVEX_V128;
+ masked_load_unaligned, IsReMaterializable,
+ SelectOprr>, EVEX_V128;
}
}
multiclass avx512_store<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
PatFrag st_frag, PatFrag mstore> {
+ let hasSideEffects = 0 in {
def rr_REV : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst), (ins _.RC:$src),
OpcodeStr # ".s\t{$src, $dst|$dst, $src}",
[], _.ExeDomain>, EVEX;
@@ -2644,8 +2649,8 @@ multiclass avx512_store<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
OpcodeStr # ".s\t{$src, ${dst} {${mask}} {z}|" #
"${dst} {${mask}} {z}, $src}",
[], _.ExeDomain>, EVEX, EVEX_KZ;
+ }
- let mayStore = 1 in {
def mr : AVX512PI<opc, MRMDestMem, (outs), (ins _.MemOp:$dst, _.RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(st_frag (_.VT _.RC:$src), addr:$dst)], _.ExeDomain>, EVEX;
@@ -2653,7 +2658,6 @@ multiclass avx512_store<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
(ins _.MemOp:$dst, _.KRCWM:$mask, _.RC:$src),
OpcodeStr # "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}",
[], _.ExeDomain>, EVEX, EVEX_K;
- }
def: Pat<(mstore addr:$ptr, _.KRCWM:$mask, (_.VT _.RC:$src)),
(!cast<Instruction>(NAME#_.ZSuffix##mrk) addr:$ptr,
@@ -2699,32 +2703,16 @@ defm VMOVAPD : avx512_alignedload_vl<0x28, "vmovapd", avx512vl_f64_info,
avx512_alignedstore_vl<0x29, "vmovapd", avx512vl_f64_info,
HasAVX512>, PD, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VMOVUPS : avx512_load_vl<0x10, "vmovups", avx512vl_f32_info, HasAVX512>,
+defm VMOVUPS : avx512_load_vl<0x10, "vmovups", avx512vl_f32_info, HasAVX512,
+ 1, null_frag>,
avx512_store_vl<0x11, "vmovups", avx512vl_f32_info, HasAVX512>,
PS, EVEX_CD8<32, CD8VF>;
-defm VMOVUPD : avx512_load_vl<0x10, "vmovupd", avx512vl_f64_info, HasAVX512, 0>,
+defm VMOVUPD : avx512_load_vl<0x10, "vmovupd", avx512vl_f64_info, HasAVX512, 0,
+ null_frag>,
avx512_store_vl<0x11, "vmovupd", avx512vl_f64_info, HasAVX512>,
PD, VEX_W, EVEX_CD8<64, CD8VF>;
-def: Pat<(int_x86_avx512_mask_storeu_ps_512 addr:$ptr, (v16f32 VR512:$src),
- GR16:$mask),
- (VMOVUPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
- VR512:$src)>;
-def: Pat<(int_x86_avx512_mask_storeu_pd_512 addr:$ptr, (v8f64 VR512:$src),
- GR8:$mask),
- (VMOVUPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
- VR512:$src)>;
-
-def: Pat<(int_x86_avx512_mask_store_ps_512 addr:$ptr, (v16f32 VR512:$src),
- GR16:$mask),
- (VMOVAPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
- VR512:$src)>;
-def: Pat<(int_x86_avx512_mask_store_pd_512 addr:$ptr, (v8f64 VR512:$src),
- GR8:$mask),
- (VMOVAPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
- VR512:$src)>;
-
defm VMOVDQA32 : avx512_alignedload_vl<0x6F, "vmovdqa32", avx512vl_i32_info,
HasAVX512>,
avx512_alignedstore_vl<0x7F, "vmovdqa32", avx512vl_i32_info,
@@ -2743,50 +2731,159 @@ defm VMOVDQU16 : avx512_load_vl<0x6F, "vmovdqu16", avx512vl_i16_info, HasBWI>,
avx512_store_vl<0x7F, "vmovdqu16", avx512vl_i16_info,
HasBWI>, XD, VEX_W, EVEX_CD8<16, CD8VF>;
-defm VMOVDQU32 : avx512_load_vl<0x6F, "vmovdqu32", avx512vl_i32_info, HasAVX512>,
+defm VMOVDQU32 : avx512_load_vl<0x6F, "vmovdqu32", avx512vl_i32_info, HasAVX512,
+ 1, null_frag>,
avx512_store_vl<0x7F, "vmovdqu32", avx512vl_i32_info,
HasAVX512>, XS, EVEX_CD8<32, CD8VF>;
-defm VMOVDQU64 : avx512_load_vl<0x6F, "vmovdqu64", avx512vl_i64_info, HasAVX512>,
+defm VMOVDQU64 : avx512_load_vl<0x6F, "vmovdqu64", avx512vl_i64_info, HasAVX512,
+ 1, null_frag>,
avx512_store_vl<0x7F, "vmovdqu64", avx512vl_i64_info,
HasAVX512>, XS, VEX_W, EVEX_CD8<64, CD8VF>;
-def: Pat<(v16i32 (int_x86_avx512_mask_loadu_d_512 addr:$ptr,
- (v16i32 immAllZerosV), GR16:$mask)),
- (VMOVDQU32Zrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
-
-def: Pat<(v8i64 (int_x86_avx512_mask_loadu_q_512 addr:$ptr,
- (bc_v8i64 (v16i32 immAllZerosV)), GR8:$mask)),
- (VMOVDQU64Zrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
-
-def: Pat<(int_x86_avx512_mask_storeu_d_512 addr:$ptr, (v16i32 VR512:$src),
- GR16:$mask),
- (VMOVDQU32Zmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
- VR512:$src)>;
-def: Pat<(int_x86_avx512_mask_storeu_q_512 addr:$ptr, (v8i64 VR512:$src),
- GR8:$mask),
- (VMOVDQU64Zmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
- VR512:$src)>;
-
-let AddedComplexity = 20 in {
-def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src),
- (bc_v8i64 (v16i32 immAllZerosV)))),
- (VMOVDQU64Zrrkz VK8WM:$mask, VR512:$src)>;
-
def : Pat<(v8i64 (vselect VK8WM:$mask, (bc_v8i64 (v16i32 immAllZerosV)),
(v8i64 VR512:$src))),
- (VMOVDQU64Zrrkz (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$mask, VK16)),
+ (VMOVDQA64Zrrkz (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$mask, VK16)),
VK8), VR512:$src)>;
-def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 VR512:$src),
- (v16i32 immAllZerosV))),
- (VMOVDQU32Zrrkz VK16WM:$mask, VR512:$src)>;
-
def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 immAllZerosV),
(v16i32 VR512:$src))),
- (VMOVDQU32Zrrkz (KNOTWrr VK16WM:$mask), VR512:$src)>;
+ (VMOVDQA32Zrrkz (KNOTWrr VK16WM:$mask), VR512:$src)>;
+
+// These patterns exist to prevent the above patterns from introducing a second
+// mask inversion when one already exists.
+def : Pat<(v8i64 (vselect (xor VK8:$mask, (v8i1 immAllOnesV)),
+ (bc_v8i64 (v16i32 immAllZerosV)),
+ (v8i64 VR512:$src))),
+ (VMOVDQA64Zrrkz VK8:$mask, VR512:$src)>;
+def : Pat<(v16i32 (vselect (xor VK16:$mask, (v16i1 immAllOnesV)),
+ (v16i32 immAllZerosV),
+ (v16i32 VR512:$src))),
+ (VMOVDQA32Zrrkz VK16WM:$mask, VR512:$src)>;
+
+let Predicates = [HasVLX] in {
+ // Special patterns for storing subvector extracts of lower 128-bits of 256.
+ // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
+ def : Pat<(alignedstore (v2f64 (extract_subvector
+ (v4f64 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPDZ128mr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v4f32 (extract_subvector
+ (v8f32 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPSZ128mr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v2i64 (extract_subvector
+ (v4i64 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA64Z128mr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v4i32 (extract_subvector
+ (v8i32 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z128mr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v8i16 (extract_subvector
+ (v16i16 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z128mr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v16i8 (extract_subvector
+ (v32i8 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z128mr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+
+ def : Pat<(store (v2f64 (extract_subvector
+ (v4f64 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVUPDZ128mr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(store (v4f32 (extract_subvector
+ (v8f32 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVUPSZ128mr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(store (v2i64 (extract_subvector
+ (v4i64 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU64Z128mr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(store (v4i32 (extract_subvector
+ (v8i32 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z128mr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(store (v8i16 (extract_subvector
+ (v16i16 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z128mr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(store (v16i8 (extract_subvector
+ (v32i8 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z128mr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+
+ // Special patterns for storing subvector extracts of lower 128-bits of 512.
+ // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
+ def : Pat<(alignedstore (v2f64 (extract_subvector
+ (v8f64 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPDZ128mr addr:$dst, (v2f64 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v4f32 (extract_subvector
+ (v16f32 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPSZ128mr addr:$dst, (v4f32 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v2i64 (extract_subvector
+ (v8i64 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA64Z128mr addr:$dst, (v2i64 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v4i32 (extract_subvector
+ (v16i32 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z128mr addr:$dst, (v4i32 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v8i16 (extract_subvector
+ (v32i16 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z128mr addr:$dst, (v8i16 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v16i8 (extract_subvector
+ (v64i8 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z128mr addr:$dst, (v16i8 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+
+ def : Pat<(store (v2f64 (extract_subvector
+ (v8f64 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVUPDZ128mr addr:$dst, (v2f64 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(store (v4f32 (extract_subvector
+ (v16f32 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVUPSZ128mr addr:$dst, (v4f32 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(store (v2i64 (extract_subvector
+ (v8i64 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU64Z128mr addr:$dst, (v2i64 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(store (v4i32 (extract_subvector
+ (v16i32 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z128mr addr:$dst, (v4i32 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(store (v8i16 (extract_subvector
+ (v32i16 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z128mr addr:$dst, (v8i16 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(store (v16i8 (extract_subvector
+ (v64i8 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z128mr addr:$dst, (v16i8 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+
+ // Special patterns for storing subvector extracts of lower 256-bits of 512.
+ // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
+ def : Pat<(alignedstore (v4f64 (extract_subvector
+ (v8f64 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPDZ256mr addr:$dst, (v4f64 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(alignedstore (v8f32 (extract_subvector
+ (v16f32 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPSZ256mr addr:$dst, (v8f32 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(alignedstore (v4i64 (extract_subvector
+ (v8i64 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA64Z256mr addr:$dst, (v4i64 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(alignedstore (v8i32 (extract_subvector
+ (v16i32 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z256mr addr:$dst, (v8i32 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(alignedstore (v16i16 (extract_subvector
+ (v32i16 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z256mr addr:$dst, (v16i16 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(alignedstore (v32i8 (extract_subvector
+ (v64i8 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z256mr addr:$dst, (v32i8 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+
+ def : Pat<(store (v4f64 (extract_subvector
+ (v8f64 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVUPDZ256mr addr:$dst, (v4f64 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(store (v8f32 (extract_subvector
+ (v16f32 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVUPSZ256mr addr:$dst, (v8f32 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(store (v4i64 (extract_subvector
+ (v8i64 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU64Z256mr addr:$dst, (v4i64 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(store (v8i32 (extract_subvector
+ (v16i32 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z256mr addr:$dst, (v8i32 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(store (v16i16 (extract_subvector
+ (v32i16 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z256mr addr:$dst, (v16i16 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(store (v32i8 (extract_subvector
+ (v64i8 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z256mr addr:$dst, (v32i8 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
}
+
// Move Int Doubleword to Packed Double Int
//
def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
@@ -2910,45 +3007,43 @@ def VMOVQI2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
// AVX-512 MOVSS, MOVSD
//===----------------------------------------------------------------------===//
-multiclass avx512_move_scalar <string asm, SDNode OpNode,
+multiclass avx512_move_scalar <string asm, SDNode OpNode,
X86VectorVTInfo _> {
- defm rr_Int : AVX512_maskable_scalar<0x10, MRMSrcReg, _, (outs _.RC:$dst),
+ defm rr_Int : AVX512_maskable_scalar<0x10, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.RC:$src2),
- asm, "$src2, $src1","$src1, $src2",
+ asm, "$src2, $src1","$src1, $src2",
(_.VT (OpNode (_.VT _.RC:$src1),
(_.VT _.RC:$src2))),
IIC_SSE_MOV_S_RR>, EVEX_4V;
- let Constraints = "$src1 = $dst" , mayLoad = 1 in
+ let Constraints = "$src1 = $dst" in
defm rm_Int : AVX512_maskable_3src_scalar<0x10, MRMSrcMem, _,
- (outs _.RC:$dst),
+ (outs _.RC:$dst),
(ins _.ScalarMemOp:$src),
asm,"$src","$src",
- (_.VT (OpNode (_.VT _.RC:$src1),
- (_.VT (scalar_to_vector
+ (_.VT (OpNode (_.VT _.RC:$src1),
+ (_.VT (scalar_to_vector
(_.ScalarLdFrag addr:$src)))))>, EVEX;
let isCodeGenOnly = 1 in {
- def rr : AVX512PI<0x10, MRMSrcReg, (outs _.RC:$dst),
+ def rr : AVX512PI<0x10, MRMSrcReg, (outs _.RC:$dst),
(ins _.RC:$src1, _.FRC:$src2),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.RC:$dst, (_.VT (OpNode _.RC:$src1,
(scalar_to_vector _.FRC:$src2))))],
_.ExeDomain,IIC_SSE_MOV_S_RR>, EVEX_4V;
- let mayLoad = 1 in
def rm : AVX512PI<0x10, MRMSrcMem, (outs _.FRC:$dst), (ins _.ScalarMemOp:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
[(set _.FRC:$dst, (_.ScalarLdFrag addr:$src))],
_.ExeDomain, IIC_SSE_MOV_S_RM>, EVEX;
}
- let mayStore = 1 in {
- def mr: AVX512PI<0x11, MRMDestMem, (outs), (ins _.ScalarMemOp:$dst, _.FRC:$src),
- !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
- [(store _.FRC:$src, addr:$dst)], _.ExeDomain, IIC_SSE_MOV_S_MR>,
- EVEX;
- def mrk: AVX512PI<0x11, MRMDestMem, (outs),
- (ins _.ScalarMemOp:$dst, VK1WM:$mask, _.FRC:$src),
- !strconcat(asm, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
- [], _.ExeDomain, IIC_SSE_MOV_S_MR>, EVEX, EVEX_K;
- } // mayStore
+ def mr: AVX512PI<0x11, MRMDestMem, (outs), (ins _.ScalarMemOp:$dst, _.FRC:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+ [(store _.FRC:$src, addr:$dst)], _.ExeDomain, IIC_SSE_MOV_S_MR>,
+ EVEX;
+ let mayStore = 1 in
+ def mrk: AVX512PI<0x11, MRMDestMem, (outs),
+ (ins _.ScalarMemOp:$dst, VK1WM:$mask, _.FRC:$src),
+ !strconcat(asm, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
+ [], _.ExeDomain, IIC_SSE_MOV_S_MR>, EVEX, EVEX_K;
}
defm VMOVSSZ : avx512_move_scalar<"vmovss", X86Movss, f32x_info>,
@@ -2957,11 +3052,11 @@ defm VMOVSSZ : avx512_move_scalar<"vmovss", X86Movss, f32x_info>,
defm VMOVSDZ : avx512_move_scalar<"vmovsd", X86Movsd, f64x_info>,
VEX_LIG, XD, VEX_W, EVEX_CD8<64, CD8VT1>;
-def : Pat<(f32 (X86select VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))),
+def : Pat<(f32 (X86selects VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))),
(COPY_TO_REGCLASS (VMOVSSZrr_Intk (COPY_TO_REGCLASS FR32X:$src2, VR128X),
VK1WM:$mask, (v4f32 (IMPLICIT_DEF)),(COPY_TO_REGCLASS FR32X:$src1, VR128X)), FR32X)>;
-def : Pat<(f64 (X86select VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
+def : Pat<(f64 (X86selects VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
(COPY_TO_REGCLASS (VMOVSDZrr_Intk (COPY_TO_REGCLASS FR64X:$src2, VR128X),
VK1WM:$mask, (v2f64 (IMPLICIT_DEF)), (COPY_TO_REGCLASS FR64X:$src1, VR128X)), FR64X)>;
@@ -2969,11 +3064,13 @@ def : Pat<(int_x86_avx512_mask_store_ss addr:$dst, VR128X:$src, GR8:$mask),
(VMOVSSZmrk addr:$dst, (i1 (COPY_TO_REGCLASS GR8:$mask, VK1WM)),
(COPY_TO_REGCLASS VR128X:$src, FR32X))>;
+let hasSideEffects = 0 in
defm VMOVSSZrr_REV : AVX512_maskable_in_asm<0x11, MRMDestReg, f32x_info,
(outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2),
"vmovss.s", "$src2, $src1", "$src1, $src2", []>,
XS, EVEX_4V, VEX_LIG;
+let hasSideEffects = 0 in
defm VMOVSSDrr_REV : AVX512_maskable_in_asm<0x11, MRMDestReg, f64x_info,
(outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2),
"vmovsd.s", "$src2, $src1", "$src1, $src2", []>,
@@ -3037,6 +3134,22 @@ let Predicates = [HasAVX512] in {
def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
(v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
+ def : Pat<(v4f64 (X86vzload addr:$src)),
+ (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
+
+ // Represent the same patterns above but in the form they appear for
+ // 512-bit types
+ def : Pat<(v16i32 (X86vzmovl (insert_subvector undef,
+ (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
+ (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
+ def : Pat<(v16f32 (X86vzmovl (insert_subvector undef,
+ (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
+ (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
+ def : Pat<(v8f64 (X86vzmovl (insert_subvector undef,
+ (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
+ (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
+ def : Pat<(v8f64 (X86vzload addr:$src)),
+ (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
}
def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
(v4f32 (scalar_to_vector FR32X:$src)), (iPTR 0)))),
@@ -3064,9 +3177,6 @@ let Predicates = [HasAVX512] in {
def : Pat<(store (f32 (extractelt (v4f32 VR128X:$src), (iPTR 0))),
addr:$dst),
(VMOVSSZmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128X:$src), FR32X))>;
- def : Pat<(store (f64 (extractelt (v2f64 VR128X:$src), (iPTR 0))),
- addr:$dst),
- (VMOVSDZmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128X:$src), FR64X))>;
// Shuffle with VMOVSS
def : Pat<(v4i32 (X86Movss VR128X:$src1, VR128X:$src2)),
@@ -3138,14 +3248,21 @@ def VMOVZPQILo2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
EVEX_CD8<8, CD8VT8>;
let Predicates = [HasAVX512] in {
+ let AddedComplexity = 15 in {
+ def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
+ (VMOVDI2PDIZrr GR32:$src)>;
+
+ def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
+ (VMOV64toPQIZrr GR64:$src)>;
+
+ def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
+ (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
+ (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
+ }
// AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
let AddedComplexity = 20 in {
def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
(VMOVDI2PDIZrm addr:$src)>;
- def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
- (VMOV64toPQIZrr GR64:$src)>;
- def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
- (VMOVDI2PDIZrr GR32:$src)>;
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
(VMOVDI2PDIZrm addr:$src)>;
@@ -3157,15 +3274,18 @@ let Predicates = [HasAVX512] in {
(VMOVZPQILo2PQIZrr VR128X:$src)>;
def : Pat<(v2i64 (X86vzload addr:$src)),
(VMOVZPQILo2PQIZrm addr:$src)>;
+ def : Pat<(v4i64 (X86vzload addr:$src)),
+ (SUBREG_TO_REG (i64 0), (VMOVZPQILo2PQIZrm addr:$src), sub_xmm)>;
}
// Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
(v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>;
- def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
- (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
- (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
+
+ // Use regular 128-bit instructions to match 512-bit scalar_to_vec+zext.
+ def : Pat<(v8i64 (X86vzload addr:$src)),
+ (SUBREG_TO_REG (i64 0), (VMOVZPQILo2PQIZrm addr:$src), sub_xmm)>;
}
def : Pat<(v16i32 (X86Vinsert (v16i32 immAllZerosV), GR32:$src2, (iPTR 0))),
@@ -3190,66 +3310,112 @@ let SchedRW = [WriteLoad] in {
SSEPackedInt>, EVEX, T8PD, EVEX_V512,
EVEX_CD8<64, CD8VF>;
- let Predicates = [HasAVX512, HasVLX] in {
+ let Predicates = [HasVLX] in {
def VMOVNTDQAZ256rm : AVX512PI<0x2A, MRMSrcMem, (outs VR256X:$dst),
- (ins i256mem:$src),
- "vmovntdqa\t{$src, $dst|$dst, $src}", [],
- SSEPackedInt>, EVEX, T8PD, EVEX_V256,
- EVEX_CD8<64, CD8VF>;
+ (ins i256mem:$src),
+ "vmovntdqa\t{$src, $dst|$dst, $src}",
+ [(set VR256X:$dst, (int_x86_avx2_movntdqa addr:$src))],
+ SSEPackedInt>, EVEX, T8PD, EVEX_V256,
+ EVEX_CD8<64, CD8VF>;
def VMOVNTDQAZ128rm : AVX512PI<0x2A, MRMSrcMem, (outs VR128X:$dst),
- (ins i128mem:$src),
- "vmovntdqa\t{$src, $dst|$dst, $src}", [],
- SSEPackedInt>, EVEX, T8PD, EVEX_V128,
- EVEX_CD8<64, CD8VF>;
+ (ins i128mem:$src),
+ "vmovntdqa\t{$src, $dst|$dst, $src}",
+ [(set VR128X:$dst, (int_x86_sse41_movntdqa addr:$src))],
+ SSEPackedInt>, EVEX, T8PD, EVEX_V128,
+ EVEX_CD8<64, CD8VF>;
}
}
-multiclass avx512_movnt<bits<8> opc, string OpcodeStr, PatFrag st_frag,
- ValueType OpVT, RegisterClass RC, X86MemOperand memop,
- Domain d, InstrItinClass itin = IIC_SSE_MOVNT> {
- let SchedRW = [WriteStore], mayStore = 1,
- AddedComplexity = 400 in
- def mr : AVX512PI<opc, MRMDestMem, (outs), (ins memop:$dst, RC:$src),
+multiclass avx512_movnt<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
+ PatFrag st_frag = alignednontemporalstore,
+ InstrItinClass itin = IIC_SSE_MOVNT> {
+ let SchedRW = [WriteStore], AddedComplexity = 400 in
+ def mr : AVX512PI<opc, MRMDestMem, (outs), (ins _.MemOp:$dst, _.RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(st_frag (OpVT RC:$src), addr:$dst)], d, itin>, EVEX;
+ [(st_frag (_.VT _.RC:$src), addr:$dst)],
+ _.ExeDomain, itin>, EVEX, EVEX_CD8<_.EltSize, CD8VF>;
}
-multiclass avx512_movnt_vl<bits<8> opc, string OpcodeStr, PatFrag st_frag,
- string elty, string elsz, string vsz512,
- string vsz256, string vsz128, Domain d,
- Predicate prd, InstrItinClass itin = IIC_SSE_MOVNT> {
- let Predicates = [prd] in
- defm Z : avx512_movnt<opc, OpcodeStr, st_frag,
- !cast<ValueType>("v"##vsz512##elty##elsz), VR512,
- !cast<X86MemOperand>(elty##"512mem"), d, itin>,
- EVEX_V512;
-
- let Predicates = [prd, HasVLX] in {
- defm Z256 : avx512_movnt<opc, OpcodeStr, st_frag,
- !cast<ValueType>("v"##vsz256##elty##elsz), VR256X,
- !cast<X86MemOperand>(elty##"256mem"), d, itin>,
- EVEX_V256;
+multiclass avx512_movnt_vl<bits<8> opc, string OpcodeStr,
+ AVX512VLVectorVTInfo VTInfo> {
+ let Predicates = [HasAVX512] in
+ defm Z : avx512_movnt<opc, OpcodeStr, VTInfo.info512>, EVEX_V512;
- defm Z128 : avx512_movnt<opc, OpcodeStr, st_frag,
- !cast<ValueType>("v"##vsz128##elty##elsz), VR128X,
- !cast<X86MemOperand>(elty##"128mem"), d, itin>,
- EVEX_V128;
- }
+ let Predicates = [HasAVX512, HasVLX] in {
+ defm Z256 : avx512_movnt<opc, OpcodeStr, VTInfo.info256>, EVEX_V256;
+ defm Z128 : avx512_movnt<opc, OpcodeStr, VTInfo.info128>, EVEX_V128;
+ }
+}
+
+defm VMOVNTDQ : avx512_movnt_vl<0xE7, "vmovntdq", avx512vl_i64_info>, PD;
+defm VMOVNTPD : avx512_movnt_vl<0x2B, "vmovntpd", avx512vl_f64_info>, PD, VEX_W;
+defm VMOVNTPS : avx512_movnt_vl<0x2B, "vmovntps", avx512vl_f32_info>, PS;
+
+let Predicates = [HasAVX512], AddedComplexity = 400 in {
+ def : Pat<(alignednontemporalstore (v16i32 VR512:$src), addr:$dst),
+ (VMOVNTDQZmr addr:$dst, VR512:$src)>;
+ def : Pat<(alignednontemporalstore (v32i16 VR512:$src), addr:$dst),
+ (VMOVNTDQZmr addr:$dst, VR512:$src)>;
+ def : Pat<(alignednontemporalstore (v64i8 VR512:$src), addr:$dst),
+ (VMOVNTDQZmr addr:$dst, VR512:$src)>;
+
+ def : Pat<(v8f64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZrm addr:$src)>;
+ def : Pat<(v16f32 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZrm addr:$src)>;
+ def : Pat<(v8i64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZrm addr:$src)>;
+ def : Pat<(v16i32 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZrm addr:$src)>;
+ def : Pat<(v32i16 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZrm addr:$src)>;
+ def : Pat<(v64i8 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZrm addr:$src)>;
+}
+
+let Predicates = [HasVLX], AddedComplexity = 400 in {
+ def : Pat<(alignednontemporalstore (v8i32 VR256X:$src), addr:$dst),
+ (VMOVNTDQZ256mr addr:$dst, VR256X:$src)>;
+ def : Pat<(alignednontemporalstore (v16i16 VR256X:$src), addr:$dst),
+ (VMOVNTDQZ256mr addr:$dst, VR256X:$src)>;
+ def : Pat<(alignednontemporalstore (v32i8 VR256X:$src), addr:$dst),
+ (VMOVNTDQZ256mr addr:$dst, VR256X:$src)>;
+
+ def : Pat<(v4f64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ256rm addr:$src)>;
+ def : Pat<(v8f32 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ256rm addr:$src)>;
+ def : Pat<(v4i64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ256rm addr:$src)>;
+ def : Pat<(v8i32 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ256rm addr:$src)>;
+ def : Pat<(v16i16 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ256rm addr:$src)>;
+ def : Pat<(v32i8 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ256rm addr:$src)>;
+
+ def : Pat<(alignednontemporalstore (v4i32 VR128X:$src), addr:$dst),
+ (VMOVNTDQZ128mr addr:$dst, VR128X:$src)>;
+ def : Pat<(alignednontemporalstore (v8i16 VR128X:$src), addr:$dst),
+ (VMOVNTDQZ128mr addr:$dst, VR128X:$src)>;
+ def : Pat<(alignednontemporalstore (v16i8 VR128X:$src), addr:$dst),
+ (VMOVNTDQZ128mr addr:$dst, VR128X:$src)>;
+
+ def : Pat<(v2f64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ128rm addr:$src)>;
+ def : Pat<(v4f32 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ128rm addr:$src)>;
+ def : Pat<(v2i64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ128rm addr:$src)>;
+ def : Pat<(v4i32 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ128rm addr:$src)>;
+ def : Pat<(v8i16 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ128rm addr:$src)>;
+ def : Pat<(v16i8 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ128rm addr:$src)>;
}
-defm VMOVNTDQ : avx512_movnt_vl<0xE7, "vmovntdq", alignednontemporalstore,
- "i", "64", "8", "4", "2", SSEPackedInt,
- HasAVX512>, PD, EVEX_CD8<64, CD8VF>;
-
-defm VMOVNTPD : avx512_movnt_vl<0x2B, "vmovntpd", alignednontemporalstore,
- "f", "64", "8", "4", "2", SSEPackedDouble,
- HasAVX512>, PD, VEX_W, EVEX_CD8<64, CD8VF>;
-
-defm VMOVNTPS : avx512_movnt_vl<0x2B, "vmovntps", alignednontemporalstore,
- "f", "32", "16", "8", "4", SSEPackedSingle,
- HasAVX512>, PS, EVEX_CD8<32, CD8VF>;
-
//===----------------------------------------------------------------------===//
// AVX-512 - Integer arithmetic
//
@@ -3263,30 +3429,28 @@ multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
itins.rr, IsCommutable>,
AVX512BIBase, EVEX_4V;
- let mayLoad = 1 in
- defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
- "$src2, $src1", "$src1, $src2",
- (_.VT (OpNode _.RC:$src1,
- (bitconvert (_.LdFrag addr:$src2)))),
- itins.rm>,
- AVX512BIBase, EVEX_4V;
+ defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (_.VT (OpNode _.RC:$src1,
+ (bitconvert (_.LdFrag addr:$src2)))),
+ itins.rm>,
+ AVX512BIBase, EVEX_4V;
}
multiclass avx512_binop_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _, OpndItins itins,
bit IsCommutable = 0> :
avx512_binop_rm<opc, OpcodeStr, OpNode, _, itins, IsCommutable> {
- let mayLoad = 1 in
- defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
- "${src2}"##_.BroadcastStr##", $src1",
- "$src1, ${src2}"##_.BroadcastStr,
- (_.VT (OpNode _.RC:$src1,
- (X86VBroadcast
- (_.ScalarLdFrag addr:$src2)))),
- itins.rm>,
- AVX512BIBase, EVEX_4V, EVEX_B;
+ defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
+ "${src2}"##_.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_.BroadcastStr,
+ (_.VT (OpNode _.RC:$src1,
+ (X86VBroadcast
+ (_.ScalarLdFrag addr:$src2)))),
+ itins.rm>,
+ AVX512BIBase, EVEX_4V, EVEX_B;
}
multiclass avx512_binop_rm_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -3380,7 +3544,8 @@ multiclass avx512_binop_rm_vl_all<bits<8> opc_b, bits<8> opc_w,
multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, OpndItins itins,
SDNode OpNode,X86VectorVTInfo _Src,
- X86VectorVTInfo _Dst, bit IsCommutable = 0> {
+ X86VectorVTInfo _Dst, X86VectorVTInfo _Brdct,
+ bit IsCommutable = 0> {
defm rr : AVX512_maskable<opc, MRMSrcReg, _Dst, (outs _Dst.RC:$dst),
(ins _Src.RC:$src1, _Src.RC:$src2), OpcodeStr,
"$src2, $src1","$src1, $src2",
@@ -3389,26 +3554,24 @@ multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, OpndItins itins,
(_Src.VT _Src.RC:$src2))),
itins.rr, IsCommutable>,
AVX512BIBase, EVEX_4V;
- let mayLoad = 1 in {
- defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
- (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
- "$src2, $src1", "$src1, $src2",
- (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1),
- (bitconvert (_Src.LdFrag addr:$src2)))),
- itins.rm>,
- AVX512BIBase, EVEX_4V;
-
- defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
- (ins _Src.RC:$src1, _Dst.ScalarMemOp:$src2),
- OpcodeStr,
- "${src2}"##_Dst.BroadcastStr##", $src1",
- "$src1, ${src2}"##_Dst.BroadcastStr,
- (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1), (bitconvert
- (_Dst.VT (X86VBroadcast
- (_Dst.ScalarLdFrag addr:$src2)))))),
+ defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
+ (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1),
+ (bitconvert (_Src.LdFrag addr:$src2)))),
itins.rm>,
- AVX512BIBase, EVEX_4V, EVEX_B;
- }
+ AVX512BIBase, EVEX_4V;
+
+ defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
+ (ins _Src.RC:$src1, _Dst.ScalarMemOp:$src2),
+ OpcodeStr,
+ "${src2}"##_Brdct.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_Dst.BroadcastStr,
+ (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1), (bitconvert
+ (_Brdct.VT (X86VBroadcast
+ (_Brdct.ScalarLdFrag addr:$src2)))))),
+ itins.rm>,
+ AVX512BIBase, EVEX_4V, EVEX_B;
}
defm VPADD : avx512_binop_rm_vl_all<0xFC, 0xFD, 0xFE, 0xD4, "vpadd", add,
@@ -3439,39 +3602,46 @@ defm VPAVG : avx512_binop_rm_vl_bw<0xE0, 0xE3, "vpavg", X86avg,
SSE_INTALU_ITINS_P, HasBWI, 1>;
multiclass avx512_binop_all<bits<8> opc, string OpcodeStr, OpndItins itins,
- SDNode OpNode, bit IsCommutable = 0> {
-
- defm NAME#Z : avx512_binop_rm2<opc, OpcodeStr, itins, OpNode,
- v16i32_info, v8i64_info, IsCommutable>,
- EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
- let Predicates = [HasVLX] in {
+ AVX512VLVectorVTInfo _SrcVTInfo, AVX512VLVectorVTInfo _DstVTInfo,
+ SDNode OpNode, Predicate prd, bit IsCommutable = 0> {
+ let Predicates = [prd] in
+ defm NAME#Z : avx512_binop_rm2<opc, OpcodeStr, itins, OpNode,
+ _SrcVTInfo.info512, _DstVTInfo.info512,
+ v8i64_info, IsCommutable>,
+ EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
+ let Predicates = [HasVLX, prd] in {
defm NAME#Z256 : avx512_binop_rm2<opc, OpcodeStr, itins, OpNode,
- v8i32x_info, v4i64x_info, IsCommutable>,
- EVEX_V256, EVEX_CD8<64, CD8VF>, VEX_W;
+ _SrcVTInfo.info256, _DstVTInfo.info256,
+ v4i64x_info, IsCommutable>,
+ EVEX_V256, EVEX_CD8<64, CD8VF>, VEX_W;
defm NAME#Z128 : avx512_binop_rm2<opc, OpcodeStr, itins, OpNode,
- v4i32x_info, v2i64x_info, IsCommutable>,
+ _SrcVTInfo.info128, _DstVTInfo.info128,
+ v2i64x_info, IsCommutable>,
EVEX_V128, EVEX_CD8<64, CD8VF>, VEX_W;
}
}
defm VPMULDQ : avx512_binop_all<0x28, "vpmuldq", SSE_INTALU_ITINS_P,
- X86pmuldq, 1>,T8PD;
+ avx512vl_i32_info, avx512vl_i64_info,
+ X86pmuldq, HasAVX512, 1>,T8PD;
defm VPMULUDQ : avx512_binop_all<0xF4, "vpmuludq", SSE_INTMUL_ITINS_P,
- X86pmuludq, 1>;
+ avx512vl_i32_info, avx512vl_i64_info,
+ X86pmuludq, HasAVX512, 1>;
+defm VPMULTISHIFTQB : avx512_binop_all<0x83, "vpmultishiftqb", SSE_INTALU_ITINS_P,
+ avx512vl_i8_info, avx512vl_i8_info,
+ X86multishift, HasVBMI, 0>, T8PD;
multiclass avx512_packs_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _Src, X86VectorVTInfo _Dst> {
- let mayLoad = 1 in {
- defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
- (ins _Src.RC:$src1, _Src.ScalarMemOp:$src2),
- OpcodeStr,
- "${src2}"##_Src.BroadcastStr##", $src1",
- "$src1, ${src2}"##_Src.BroadcastStr,
- (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1), (bitconvert
- (_Src.VT (X86VBroadcast
- (_Src.ScalarLdFrag addr:$src2))))))>,
- EVEX_4V, EVEX_B, EVEX_CD8<_Src.EltSize, CD8VF>;
- }
+ defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
+ (ins _Src.RC:$src1, _Src.ScalarMemOp:$src2),
+ OpcodeStr,
+ "${src2}"##_Src.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_Src.BroadcastStr,
+ (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1), (bitconvert
+ (_Src.VT (X86VBroadcast
+ (_Src.ScalarLdFrag addr:$src2))))))>,
+ EVEX_4V, EVEX_B, EVEX_CD8<_Src.EltSize, CD8VF>;
}
multiclass avx512_packs_rm<bits<8> opc, string OpcodeStr,
@@ -3484,23 +3654,22 @@ multiclass avx512_packs_rm<bits<8> opc, string OpcodeStr,
(_Src.VT _Src.RC:$src1),
(_Src.VT _Src.RC:$src2)))>,
EVEX_CD8<_Src.EltSize, CD8VF>, EVEX_4V;
- let mayLoad = 1 in {
- defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
- (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
- "$src2, $src1", "$src1, $src2",
- (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1),
- (bitconvert (_Src.LdFrag addr:$src2))))>,
- EVEX_4V, EVEX_CD8<_Src.EltSize, CD8VF>;
- }
+ defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
+ (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1),
+ (bitconvert (_Src.LdFrag addr:$src2))))>,
+ EVEX_4V, EVEX_CD8<_Src.EltSize, CD8VF>;
}
multiclass avx512_packs_all_i32_i16<bits<8> opc, string OpcodeStr,
SDNode OpNode> {
+ let Predicates = [HasBWI] in
defm NAME#Z : avx512_packs_rm<opc, OpcodeStr, OpNode, v16i32_info,
v32i16_info>,
avx512_packs_rmb<opc, OpcodeStr, OpNode, v16i32_info,
v32i16_info>, EVEX_V512;
- let Predicates = [HasVLX] in {
+ let Predicates = [HasBWI, HasVLX] in {
defm NAME#Z256 : avx512_packs_rm<opc, OpcodeStr, OpNode, v8i32x_info,
v16i16x_info>,
avx512_packs_rmb<opc, OpcodeStr, OpNode, v8i32x_info,
@@ -3513,9 +3682,10 @@ multiclass avx512_packs_all_i32_i16<bits<8> opc, string OpcodeStr,
}
multiclass avx512_packs_all_i16_i8<bits<8> opc, string OpcodeStr,
SDNode OpNode> {
+ let Predicates = [HasBWI] in
defm NAME#Z : avx512_packs_rm<opc, OpcodeStr, OpNode, v32i16_info,
v64i8_info>, EVEX_V512;
- let Predicates = [HasVLX] in {
+ let Predicates = [HasBWI, HasVLX] in {
defm NAME#Z256 : avx512_packs_rm<opc, OpcodeStr, OpNode, v16i16x_info,
v32i8x_info>, EVEX_V256;
defm NAME#Z128 : avx512_packs_rm<opc, OpcodeStr, OpNode, v8i16x_info,
@@ -3526,9 +3696,10 @@ multiclass avx512_packs_all_i16_i8<bits<8> opc, string OpcodeStr,
multiclass avx512_vpmadd<bits<8> opc, string OpcodeStr,
SDNode OpNode, AVX512VLVectorVTInfo _Src,
AVX512VLVectorVTInfo _Dst> {
+ let Predicates = [HasBWI] in
defm NAME#Z : avx512_packs_rm<opc, OpcodeStr, OpNode, _Src.info512,
_Dst.info512>, EVEX_V512;
- let Predicates = [HasVLX] in {
+ let Predicates = [HasBWI, HasVLX] in {
defm NAME#Z256 : avx512_packs_rm<opc, OpcodeStr, OpNode, _Src.info256,
_Dst.info256>, EVEX_V256;
defm NAME#Z128 : avx512_packs_rm<opc, OpcodeStr, OpNode, _Src.info128,
@@ -3536,17 +3707,15 @@ multiclass avx512_vpmadd<bits<8> opc, string OpcodeStr,
}
}
-let Predicates = [HasBWI] in {
- defm VPACKSSDW : avx512_packs_all_i32_i16<0x6B, "vpackssdw", X86Packss>, PD;
- defm VPACKUSDW : avx512_packs_all_i32_i16<0x2b, "vpackusdw", X86Packus>, T8PD;
- defm VPACKSSWB : avx512_packs_all_i16_i8 <0x63, "vpacksswb", X86Packss>, AVX512BIBase, VEX_W;
- defm VPACKUSWB : avx512_packs_all_i16_i8 <0x67, "vpackuswb", X86Packus>, AVX512BIBase, VEX_W;
+defm VPACKSSDW : avx512_packs_all_i32_i16<0x6B, "vpackssdw", X86Packss>, AVX512BIBase;
+defm VPACKUSDW : avx512_packs_all_i32_i16<0x2b, "vpackusdw", X86Packus>, AVX5128IBase;
+defm VPACKSSWB : avx512_packs_all_i16_i8 <0x63, "vpacksswb", X86Packss>, AVX512BIBase;
+defm VPACKUSWB : avx512_packs_all_i16_i8 <0x67, "vpackuswb", X86Packus>, AVX512BIBase;
- defm VPMADDUBSW : avx512_vpmadd<0x04, "vpmaddubsw", X86vpmaddubsw,
- avx512vl_i8_info, avx512vl_i16_info>, AVX512BIBase, T8PD;
- defm VPMADDWD : avx512_vpmadd<0xF5, "vpmaddwd", X86vpmaddwd,
- avx512vl_i16_info, avx512vl_i32_info>, AVX512BIBase;
-}
+defm VPMADDUBSW : avx512_vpmadd<0x04, "vpmaddubsw", X86vpmaddubsw,
+ avx512vl_i8_info, avx512vl_i16_info>, AVX512BIBase, T8PD;
+defm VPMADDWD : avx512_vpmadd<0xF5, "vpmaddwd", X86vpmaddwd,
+ avx512vl_i16_info, avx512vl_i32_info>, AVX512BIBase;
defm VPMAXSB : avx512_binop_rm_vl_b<0x3C, "vpmaxsb", smax,
SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
@@ -3603,7 +3772,7 @@ multiclass avx512_fp_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
itins.rr, IsCommutable>;
defm rm_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
(VecNode (_.VT _.RC:$src1),
(_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
@@ -3620,7 +3789,7 @@ multiclass avx512_fp_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
(ins _.FRC:$src1, _.ScalarMemOp:$src2),
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set _.FRC:$dst, (OpNode _.FRC:$src1,
- (_.ScalarLdFrag addr:$src2)))], itins.rr>;
+ (_.ScalarLdFrag addr:$src2)))], itins.rm>;
}
}
@@ -3677,8 +3846,41 @@ defm VADD : avx512_binop_s_round<0x58, "vadd", fadd, X86faddRnd, SSE_ALU_ITINS_S
defm VMUL : avx512_binop_s_round<0x59, "vmul", fmul, X86fmulRnd, SSE_ALU_ITINS_S, 1>;
defm VSUB : avx512_binop_s_round<0x5C, "vsub", fsub, X86fsubRnd, SSE_ALU_ITINS_S, 0>;
defm VDIV : avx512_binop_s_round<0x5E, "vdiv", fdiv, X86fdivRnd, SSE_ALU_ITINS_S, 0>;
-defm VMIN : avx512_binop_s_sae <0x5D, "vmin", X86fmin, X86fminRnd, SSE_ALU_ITINS_S, 1>;
-defm VMAX : avx512_binop_s_sae <0x5F, "vmax", X86fmax, X86fmaxRnd, SSE_ALU_ITINS_S, 1>;
+defm VMIN : avx512_binop_s_sae <0x5D, "vmin", X86fmin, X86fminRnd, SSE_ALU_ITINS_S, 0>;
+defm VMAX : avx512_binop_s_sae <0x5F, "vmax", X86fmax, X86fmaxRnd, SSE_ALU_ITINS_S, 0>;
+
+// MIN/MAX nodes are commutable under "unsafe-fp-math". In this case we use
+// X86fminc and X86fmaxc instead of X86fmin and X86fmax
+multiclass avx512_comutable_binop_s<bits<8> opc, string OpcodeStr,
+ X86VectorVTInfo _, SDNode OpNode, OpndItins itins> {
+ let isCodeGenOnly = 1, isCommutable =1, Predicates = [HasAVX512] in {
+ def rr : I< opc, MRMSrcReg, (outs _.FRC:$dst),
+ (ins _.FRC:$src1, _.FRC:$src2),
+ OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set _.FRC:$dst, (OpNode _.FRC:$src1, _.FRC:$src2))],
+ itins.rr>;
+ def rm : I< opc, MRMSrcMem, (outs _.FRC:$dst),
+ (ins _.FRC:$src1, _.ScalarMemOp:$src2),
+ OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set _.FRC:$dst, (OpNode _.FRC:$src1,
+ (_.ScalarLdFrag addr:$src2)))], itins.rm>;
+ }
+}
+defm VMINCSSZ : avx512_comutable_binop_s<0x5D, "vminss", f32x_info, X86fminc,
+ SSE_ALU_ITINS_S.s>, XS, EVEX_4V, VEX_LIG,
+ EVEX_CD8<32, CD8VT1>;
+
+defm VMINCSDZ : avx512_comutable_binop_s<0x5D, "vminsd", f64x_info, X86fminc,
+ SSE_ALU_ITINS_S.d>, XD, VEX_W, EVEX_4V, VEX_LIG,
+ EVEX_CD8<64, CD8VT1>;
+
+defm VMAXCSSZ : avx512_comutable_binop_s<0x5F, "vmaxss", f32x_info, X86fmaxc,
+ SSE_ALU_ITINS_S.s>, XS, EVEX_4V, VEX_LIG,
+ EVEX_CD8<32, CD8VT1>;
+
+defm VMAXCSDZ : avx512_comutable_binop_s<0x5F, "vmaxsd", f64x_info, X86fmaxc,
+ SSE_ALU_ITINS_S.d>, XD, VEX_W, EVEX_4V, VEX_LIG,
+ EVEX_CD8<64, CD8VT1>;
multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _, bit IsCommutable> {
@@ -3686,19 +3888,17 @@ multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
(ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode _.RC:$src1, _.RC:$src2))>, EVEX_4V;
- let mayLoad = 1 in {
- defm rm: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
- "$src2, $src1", "$src1, $src2",
- (OpNode _.RC:$src1, (_.LdFrag addr:$src2))>, EVEX_4V;
- defm rmb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
- "${src2}"##_.BroadcastStr##", $src1",
- "$src1, ${src2}"##_.BroadcastStr,
- (OpNode _.RC:$src1, (_.VT (X86VBroadcast
- (_.ScalarLdFrag addr:$src2))))>,
- EVEX_4V, EVEX_B;
- }//let mayLoad = 1
+ defm rm: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
+ "$src2, $src1", "$src1, $src2",
+ (OpNode _.RC:$src1, (_.LdFrag addr:$src2))>, EVEX_4V;
+ defm rmb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
+ "${src2}"##_.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_.BroadcastStr,
+ (OpNode _.RC:$src1, (_.VT (X86VBroadcast
+ (_.ScalarLdFrag addr:$src2))))>,
+ EVEX_4V, EVEX_B;
}
multiclass avx512_fp_round_packed<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd,
@@ -3721,16 +3921,18 @@ multiclass avx512_fp_sae_packed<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd,
}
multiclass avx512_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
- bit IsCommutable = 0> {
+ Predicate prd, bit IsCommutable = 0> {
+ let Predicates = [prd] in {
defm PSZ : avx512_fp_packed<opc, OpcodeStr, OpNode, v16f32_info,
IsCommutable>, EVEX_V512, PS,
EVEX_CD8<32, CD8VF>;
defm PDZ : avx512_fp_packed<opc, OpcodeStr, OpNode, v8f64_info,
IsCommutable>, EVEX_V512, PD, VEX_W,
EVEX_CD8<64, CD8VF>;
+ }
// Define only if AVX512VL feature is present.
- let Predicates = [HasVLX] in {
+ let Predicates = [prd, HasVLX] in {
defm PSZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, v4f32x_info,
IsCommutable>, EVEX_V128, PS,
EVEX_CD8<32, CD8VF>;
@@ -3760,24 +3962,26 @@ multiclass avx512_fp_binop_p_sae<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd
EVEX_V512, PD, VEX_W,EVEX_CD8<64, CD8VF>;
}
-defm VADD : avx512_fp_binop_p<0x58, "vadd", fadd, 1>,
+defm VADD : avx512_fp_binop_p<0x58, "vadd", fadd, HasAVX512, 1>,
avx512_fp_binop_p_round<0x58, "vadd", X86faddRnd>;
-defm VMUL : avx512_fp_binop_p<0x59, "vmul", fmul, 1>,
+defm VMUL : avx512_fp_binop_p<0x59, "vmul", fmul, HasAVX512, 1>,
avx512_fp_binop_p_round<0x59, "vmul", X86fmulRnd>;
-defm VSUB : avx512_fp_binop_p<0x5C, "vsub", fsub>,
+defm VSUB : avx512_fp_binop_p<0x5C, "vsub", fsub, HasAVX512>,
avx512_fp_binop_p_round<0x5C, "vsub", X86fsubRnd>;
-defm VDIV : avx512_fp_binop_p<0x5E, "vdiv", fdiv>,
+defm VDIV : avx512_fp_binop_p<0x5E, "vdiv", fdiv, HasAVX512>,
avx512_fp_binop_p_round<0x5E, "vdiv", X86fdivRnd>;
-defm VMIN : avx512_fp_binop_p<0x5D, "vmin", X86fmin, 1>,
+defm VMIN : avx512_fp_binop_p<0x5D, "vmin", X86fmin, HasAVX512, 0>,
avx512_fp_binop_p_sae<0x5D, "vmin", X86fminRnd>;
-defm VMAX : avx512_fp_binop_p<0x5F, "vmax", X86fmax, 1>,
+defm VMAX : avx512_fp_binop_p<0x5F, "vmax", X86fmax, HasAVX512, 0>,
avx512_fp_binop_p_sae<0x5F, "vmax", X86fmaxRnd>;
-let Predicates = [HasDQI] in {
- defm VAND : avx512_fp_binop_p<0x54, "vand", X86fand, 1>;
- defm VANDN : avx512_fp_binop_p<0x55, "vandn", X86fandn, 0>;
- defm VOR : avx512_fp_binop_p<0x56, "vor", X86for, 1>;
- defm VXOR : avx512_fp_binop_p<0x57, "vxor", X86fxor, 1>;
+let isCodeGenOnly = 1 in {
+ defm VMINC : avx512_fp_binop_p<0x5D, "vmin", X86fminc, HasAVX512, 1>;
+ defm VMAXC : avx512_fp_binop_p<0x5F, "vmax", X86fmaxc, HasAVX512, 1>;
}
+defm VAND : avx512_fp_binop_p<0x54, "vand", X86fand, HasDQI, 1>;
+defm VANDN : avx512_fp_binop_p<0x55, "vandn", X86fandn, HasDQI, 0>;
+defm VOR : avx512_fp_binop_p<0x56, "vor", X86for, HasDQI, 1>;
+defm VXOR : avx512_fp_binop_p<0x57, "vxor", X86fxor, HasDQI, 1>;
multiclass avx512_fp_scalef_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> {
@@ -3785,19 +3989,17 @@ multiclass avx512_fp_scalef_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
(ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode _.RC:$src1, _.RC:$src2, (i32 FROUND_CURRENT)))>, EVEX_4V;
- let mayLoad = 1 in {
- defm rm: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
- "$src2, $src1", "$src1, $src2",
- (OpNode _.RC:$src1, (_.LdFrag addr:$src2), (i32 FROUND_CURRENT))>, EVEX_4V;
- defm rmb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
- "${src2}"##_.BroadcastStr##", $src1",
- "$src1, ${src2}"##_.BroadcastStr,
- (OpNode _.RC:$src1, (_.VT (X86VBroadcast
- (_.ScalarLdFrag addr:$src2))), (i32 FROUND_CURRENT))>,
- EVEX_4V, EVEX_B;
- }//let mayLoad = 1
+ defm rm: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
+ "$src2, $src1", "$src1, $src2",
+ (OpNode _.RC:$src1, (_.LdFrag addr:$src2), (i32 FROUND_CURRENT))>, EVEX_4V;
+ defm rmb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
+ "${src2}"##_.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_.BroadcastStr,
+ (OpNode _.RC:$src1, (_.VT (X86VBroadcast
+ (_.ScalarLdFrag addr:$src2))), (i32 FROUND_CURRENT))>,
+ EVEX_4V, EVEX_B;
}
multiclass avx512_fp_scalef_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -3806,26 +4008,26 @@ multiclass avx512_fp_scalef_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
(ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode _.RC:$src1, _.RC:$src2, (i32 FROUND_CURRENT)))>;
- let mayLoad = 1 in {
- defm rm: AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
- "$src2, $src1", "$src1, $src2",
- (OpNode _.RC:$src1, (_.LdFrag addr:$src2), (i32 FROUND_CURRENT))>;
- }//let mayLoad = 1
+ defm rm: AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
+ "$src2, $src1", "$src1, $src2",
+ (OpNode _.RC:$src1,
+ (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
+ (i32 FROUND_CURRENT))>;
}
-multiclass avx512_fp_scalef_all<bits<8> opc, bits<8> opcScaler, string OpcodeStr, SDNode OpNode> {
+multiclass avx512_fp_scalef_all<bits<8> opc, bits<8> opcScaler, string OpcodeStr, SDNode OpNode, SDNode OpNodeScal> {
defm PSZ : avx512_fp_scalef_p<opc, OpcodeStr, OpNode, v16f32_info>,
avx512_fp_round_packed<opc, OpcodeStr, OpNode, v16f32_info>,
EVEX_V512, EVEX_CD8<32, CD8VF>;
defm PDZ : avx512_fp_scalef_p<opc, OpcodeStr, OpNode, v8f64_info>,
avx512_fp_round_packed<opc, OpcodeStr, OpNode, v8f64_info>,
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
- defm SSZ128 : avx512_fp_scalef_scalar<opcScaler, OpcodeStr, OpNode, f32x_info>,
- avx512_fp_scalar_round<opcScaler, OpcodeStr##"ss", f32x_info, OpNode, SSE_ALU_ITINS_S.s>,
+ defm SSZ128 : avx512_fp_scalef_scalar<opcScaler, OpcodeStr, OpNodeScal, f32x_info>,
+ avx512_fp_scalar_round<opcScaler, OpcodeStr##"ss", f32x_info, OpNodeScal, SSE_ALU_ITINS_S.s>,
EVEX_4V,EVEX_CD8<32, CD8VT1>;
- defm SDZ128 : avx512_fp_scalef_scalar<opcScaler, OpcodeStr, OpNode, f64x_info>,
- avx512_fp_scalar_round<opcScaler, OpcodeStr##"sd", f64x_info, OpNode, SSE_ALU_ITINS_S.d>,
+ defm SDZ128 : avx512_fp_scalef_scalar<opcScaler, OpcodeStr, OpNodeScal, f64x_info>,
+ avx512_fp_scalar_round<opcScaler, OpcodeStr##"sd", f64x_info, OpNodeScal, SSE_ALU_ITINS_S.d>,
EVEX_4V, EVEX_CD8<64, CD8VT1>, VEX_W;
// Define only if AVX512VL feature is present.
@@ -3840,7 +4042,7 @@ multiclass avx512_fp_scalef_all<bits<8> opc, bits<8> opcScaler, string OpcodeStr
EVEX_V256, VEX_W, EVEX_CD8<64, CD8VF>;
}
}
-defm VSCALEF : avx512_fp_scalef_all<0x2C, 0x2D, "vscalef", X86scalef>, T8PD;
+defm VSCALEF : avx512_fp_scalef_all<0x2C, 0x2D, "vscalef", X86scalef, X86scalefs>, T8PD;
//===----------------------------------------------------------------------===//
// AVX-512 VPTESTM instructions
@@ -3848,12 +4050,12 @@ defm VSCALEF : avx512_fp_scalef_all<0x2C, 0x2D, "vscalef", X86scalef>, T8PD;
multiclass avx512_vptest<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> {
+ let isCommutable = 1 in
defm rr : AVX512_maskable_cmp<opc, MRMSrcReg, _, (outs _.KRC:$dst),
(ins _.RC:$src1, _.RC:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
(OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))>,
EVEX_4V;
- let mayLoad = 1 in
defm rm : AVX512_maskable_cmp<opc, MRMSrcMem, _, (outs _.KRC:$dst),
(ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
@@ -3865,7 +4067,6 @@ multiclass avx512_vptest<bits<8> opc, string OpcodeStr, SDNode OpNode,
multiclass avx512_vptest_mb<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> {
- let mayLoad = 1 in
defm rmb : AVX512_maskable_cmp<opc, MRMSrcMem, _, (outs _.KRC:$dst),
(ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
"${src2}"##_.BroadcastStr##", $src1",
@@ -3874,8 +4075,22 @@ multiclass avx512_vptest_mb<bits<8> opc, string OpcodeStr, SDNode OpNode,
(_.ScalarLdFrag addr:$src2))))>,
EVEX_B, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
}
+
+// Use 512bit version to implement 128/256 bit in case NoVLX.
+multiclass avx512_vptest_lowering<SDNode OpNode, X86VectorVTInfo ExtendInfo,
+ X86VectorVTInfo _, string Suffix> {
+ def : Pat<(_.KVT (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))),
+ (_.KVT (COPY_TO_REGCLASS
+ (!cast<Instruction>(NAME # Suffix # "Zrr")
+ (INSERT_SUBREG (ExtendInfo.VT (IMPLICIT_DEF)),
+ _.RC:$src1, _.SubRegIdx),
+ (INSERT_SUBREG (ExtendInfo.VT (IMPLICIT_DEF)),
+ _.RC:$src2, _.SubRegIdx)),
+ _.KRC))>;
+}
+
multiclass avx512_vptest_dq_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
- AVX512VLVectorVTInfo _> {
+ AVX512VLVectorVTInfo _, string Suffix> {
let Predicates = [HasAVX512] in
defm Z : avx512_vptest<opc, OpcodeStr, OpNode, _.info512>,
avx512_vptest_mb<opc, OpcodeStr, OpNode, _.info512>, EVEX_V512;
@@ -3886,13 +4101,17 @@ multiclass avx512_vptest_dq_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
defm Z128 : avx512_vptest<opc, OpcodeStr, OpNode, _.info128>,
avx512_vptest_mb<opc, OpcodeStr, OpNode, _.info128>, EVEX_V128;
}
+ let Predicates = [HasAVX512, NoVLX] in {
+ defm Z256_Alt : avx512_vptest_lowering< OpNode, _.info512, _.info256, Suffix>;
+ defm Z128_Alt : avx512_vptest_lowering< OpNode, _.info512, _.info128, Suffix>;
+ }
}
multiclass avx512_vptest_dq<bits<8> opc, string OpcodeStr, SDNode OpNode> {
defm D : avx512_vptest_dq_sizes<opc, OpcodeStr#"d", OpNode,
- avx512vl_i32_info>;
+ avx512vl_i32_info, "D">;
defm Q : avx512_vptest_dq_sizes<opc, OpcodeStr#"q", OpNode,
- avx512vl_i64_info>, VEX_W;
+ avx512vl_i64_info, "Q">, VEX_W;
}
multiclass avx512_vptest_wb<bits<8> opc, string OpcodeStr,
@@ -3914,6 +4133,14 @@ multiclass avx512_vptest_wb<bits<8> opc, string OpcodeStr,
defm BZ128: avx512_vptest<opc, OpcodeStr#"b", OpNode, v16i8x_info>,
EVEX_V128;
}
+
+ let Predicates = [HasAVX512, NoVLX] in {
+ defm BZ256_Alt : avx512_vptest_lowering< OpNode, v64i8_info, v32i8x_info, "B">;
+ defm BZ128_Alt : avx512_vptest_lowering< OpNode, v64i8_info, v16i8x_info, "B">;
+ defm WZ256_Alt : avx512_vptest_lowering< OpNode, v32i16_info, v16i16x_info, "W">;
+ defm WZ128_Alt : avx512_vptest_lowering< OpNode, v32i16_info, v8i16x_info, "W">;
+ }
+
}
multiclass avx512_vptest_all_forms<bits<8> opc_wb, bits<8> opc_dq, string OpcodeStr,
@@ -3924,13 +4151,6 @@ multiclass avx512_vptest_all_forms<bits<8> opc_wb, bits<8> opc_dq, string Opcode
defm VPTESTM : avx512_vptest_all_forms<0x26, 0x27, "vptestm", X86testm>, T8PD;
defm VPTESTNM : avx512_vptest_all_forms<0x26, 0x27, "vptestnm", X86testnm>, T8XS;
-def : Pat <(i16 (int_x86_avx512_mask_ptestm_d_512 (v16i32 VR512:$src1),
- (v16i32 VR512:$src2), (i16 -1))),
- (COPY_TO_REGCLASS (VPTESTMDZrr VR512:$src1, VR512:$src2), GR16)>;
-
-def : Pat <(i8 (int_x86_avx512_mask_ptestm_q_512 (v8i64 VR512:$src1),
- (v8i64 VR512:$src2), (i8 -1))),
- (COPY_TO_REGCLASS (VPTESTMQZrr VR512:$src1, VR512:$src2), GR8)>;
//===----------------------------------------------------------------------===//
// AVX-512 Shift instructions
@@ -3942,7 +4162,6 @@ multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode _.RC:$src1, (i8 imm:$src2))),
SSE_INTSHIFT_ITINS_P.rr>;
- let mayLoad = 1 in
defm mi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst),
(ins _.MemOp:$src1, u8imm:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
@@ -3953,7 +4172,6 @@ multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
multiclass avx512_shift_rmbi<bits<8> opc, Format ImmFormM,
string OpcodeStr, SDNode OpNode, X86VectorVTInfo _> {
- let mayLoad = 1 in
defm mbi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst),
(ins _.ScalarMemOp:$src1, u8imm:$src2), OpcodeStr,
"$src2, ${src1}"##_.BroadcastStr, "${src1}"##_.BroadcastStr##", $src2",
@@ -4073,7 +4291,6 @@ multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode _.RC:$src1, (_.VT _.RC:$src2))),
SSE_INTSHIFT_ITINS_P.rr>, AVX5128IBase, EVEX_4V;
- let mayLoad = 1 in
defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
@@ -4085,7 +4302,6 @@ multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
multiclass avx512_var_shift_mb<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> {
- let mayLoad = 1 in
defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
"${src2}"##_.BroadcastStr##", $src1",
@@ -4117,20 +4333,20 @@ multiclass avx512_var_shift_types<bits<8> opc, string OpcodeStr,
avx512vl_i64_info>, VEX_W;
}
-// Use 512bit version to implement 128/256 bit in case NoVLX.
+// Use 512bit version to implement 128/256 bit in case NoVLX.
multiclass avx512_var_shift_w_lowering<AVX512VLVectorVTInfo _, SDNode OpNode> {
let Predicates = [HasBWI, NoVLX] in {
- def : Pat<(_.info256.VT (OpNode (_.info256.VT _.info256.RC:$src1),
+ def : Pat<(_.info256.VT (OpNode (_.info256.VT _.info256.RC:$src1),
(_.info256.VT _.info256.RC:$src2))),
- (EXTRACT_SUBREG
+ (EXTRACT_SUBREG
(!cast<Instruction>(NAME#"WZrr")
(INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR256X:$src1, sub_ymm),
(INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR256X:$src2, sub_ymm)),
sub_ymm)>;
- def : Pat<(_.info128.VT (OpNode (_.info128.VT _.info128.RC:$src1),
+ def : Pat<(_.info128.VT (OpNode (_.info128.VT _.info128.RC:$src1),
(_.info128.VT _.info128.RC:$src2))),
- (EXTRACT_SUBREG
+ (EXTRACT_SUBREG
(!cast<Instruction>(NAME#"WZrr")
(INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR128X:$src1, sub_xmm),
(INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR128X:$src2, sub_xmm)),
@@ -4155,9 +4371,14 @@ multiclass avx512_var_shift_w<bits<8> opc, string OpcodeStr,
defm VPSLLV : avx512_var_shift_types<0x47, "vpsllv", shl>,
avx512_var_shift_w<0x12, "vpsllvw", shl>,
avx512_var_shift_w_lowering<avx512vl_i16_info, shl>;
+
defm VPSRAV : avx512_var_shift_types<0x46, "vpsrav", sra>,
avx512_var_shift_w<0x11, "vpsravw", sra>,
avx512_var_shift_w_lowering<avx512vl_i16_info, sra>;
+let isCodeGenOnly = 1 in
+ defm VPSRAV_Int : avx512_var_shift_types<0x46, "vpsrav", X86vsrav>,
+ avx512_var_shift_w<0x11, "vpsravw", X86vsrav>;
+
defm VPSRLV : avx512_var_shift_types<0x45, "vpsrlv", srl>,
avx512_var_shift_w<0x10, "vpsrlvw", srl>,
avx512_var_shift_w_lowering<avx512vl_i16_info, srl>;
@@ -4193,8 +4414,24 @@ multiclass avx512_vpermi_dq_sizes<bits<8> opc, Format ImmFormR, Format ImmFormM,
VTInfo.info256>, EVEX_V256;
}
+multiclass avx512_vperm_bw<bits<8> opc, string OpcodeStr,
+ Predicate prd, SDNode OpNode,
+ AVX512VLVectorVTInfo _> {
+ let Predicates = [prd] in
+ defm Z: avx512_var_shift<opc, OpcodeStr, OpNode, _.info512>,
+ EVEX_V512 ;
+ let Predicates = [HasVLX, prd] in {
+ defm Z256: avx512_var_shift<opc, OpcodeStr, OpNode, _.info256>,
+ EVEX_V256 ;
+ defm Z128: avx512_var_shift<opc, OpcodeStr, OpNode, _.info128>,
+ EVEX_V128 ;
+ }
+}
-defm VPERM : avx512_var_shift_w<0x8D, "vpermw", X86VPermv>;
+defm VPERMW : avx512_vperm_bw<0x8D, "vpermw", HasBWI, X86VPermv,
+ avx512vl_i16_info>, VEX_W;
+defm VPERMB : avx512_vperm_bw<0x8D, "vpermb", HasVBMI, X86VPermv,
+ avx512vl_i8_info>;
defm VPERMD : avx512_vperm_dq_sizes<0x36, "vpermd", X86VPermv,
avx512vl_i32_info>;
@@ -4212,7 +4449,7 @@ defm VPERMPD : avx512_vpermi_dq_sizes<0x01, MRMSrcReg, MRMSrcMem, "vpermpd",
X86VPermi, avx512vl_f64_info>,
EVEX, AVX512AIi8Base, EVEX_CD8<64, CD8VF>, VEX_W;
//===----------------------------------------------------------------------===//
-// AVX-512 - VPERMIL
+// AVX-512 - VPERMIL
//===----------------------------------------------------------------------===//
multiclass avx512_permil_vec<bits<8> OpcVar, string OpcodeStr, SDNode OpNode,
@@ -4223,24 +4460,22 @@ multiclass avx512_permil_vec<bits<8> OpcVar, string OpcodeStr, SDNode OpNode,
(_.VT (OpNode _.RC:$src1,
(Ctrl.VT Ctrl.RC:$src2)))>,
T8PD, EVEX_4V;
- let mayLoad = 1 in {
- defm rm: AVX512_maskable<OpcVar, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, Ctrl.MemOp:$src2), OpcodeStr,
- "$src2, $src1", "$src1, $src2",
- (_.VT (OpNode
- _.RC:$src1,
- (Ctrl.VT (bitconvert(Ctrl.LdFrag addr:$src2)))))>,
- T8PD, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
- defm rmb: AVX512_maskable<OpcVar, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
- "${src2}"##_.BroadcastStr##", $src1",
- "$src1, ${src2}"##_.BroadcastStr,
- (_.VT (OpNode
- _.RC:$src1,
- (Ctrl.VT (X86VBroadcast
- (Ctrl.ScalarLdFrag addr:$src2)))))>,
- T8PD, EVEX_4V, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
- }//let mayLoad = 1
+ defm rm: AVX512_maskable<OpcVar, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, Ctrl.MemOp:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (_.VT (OpNode
+ _.RC:$src1,
+ (Ctrl.VT (bitconvert(Ctrl.LdFrag addr:$src2)))))>,
+ T8PD, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
+ defm rmb: AVX512_maskable<OpcVar, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
+ "${src2}"##_.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_.BroadcastStr,
+ (_.VT (OpNode
+ _.RC:$src1,
+ (Ctrl.VT (X86VBroadcast
+ (Ctrl.ScalarLdFrag addr:$src2)))))>,
+ T8PD, EVEX_4V, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
}
multiclass avx512_permil_vec_common<string OpcodeStr, bits<8> OpcVar,
@@ -4326,16 +4561,15 @@ let Predicates = [HasAVX512] in {
//===----------------------------------------------------------------------===//
multiclass avx512_mov_hilo_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> {
- let mayLoad = 1 in
- def rm : AVX512<opc, MRMSrcMem, (outs _.RC:$dst),
- (ins _.RC:$src1, f64mem:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set _.RC:$dst,
- (OpNode _.RC:$src1,
- (_.VT (bitconvert
- (v2f64 (scalar_to_vector (loadf64 addr:$src2)))))))],
- IIC_SSE_MOV_LH>, EVEX_4V;
+ def rm : AVX512<opc, MRMSrcMem, (outs _.RC:$dst),
+ (ins _.RC:$src1, f64mem:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set _.RC:$dst,
+ (OpNode _.RC:$src1,
+ (_.VT (bitconvert
+ (v2f64 (scalar_to_vector (loadf64 addr:$src2)))))))],
+ IIC_SSE_MOV_LH>, EVEX_4V;
}
defm VMOVHPSZ128 : avx512_mov_hilo_packed<0x16, "vmovhps", X86Movlhps,
@@ -4377,11 +4611,10 @@ let Predicates = [HasAVX512] in {
(VMOVLPDZ128rm VR128X:$src1, addr:$src2)>;
}
-let mayStore = 1 in {
def VMOVHPSZ128mr : AVX512PSI<0x17, MRMDestMem, (outs),
(ins f64mem:$dst, VR128X:$src),
"vmovhps\t{$src, $dst|$dst, $src}",
- [(store (f64 (vector_extract
+ [(store (f64 (extractelt
(X86Unpckh (bc_v2f64 (v4f32 VR128X:$src)),
(bc_v2f64 (v4f32 VR128X:$src))),
(iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>,
@@ -4389,28 +4622,28 @@ def VMOVHPSZ128mr : AVX512PSI<0x17, MRMDestMem, (outs),
def VMOVHPDZ128mr : AVX512PDI<0x17, MRMDestMem, (outs),
(ins f64mem:$dst, VR128X:$src),
"vmovhpd\t{$src, $dst|$dst, $src}",
- [(store (f64 (vector_extract
+ [(store (f64 (extractelt
(v2f64 (X86Unpckh VR128X:$src, VR128X:$src)),
(iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>,
EVEX, EVEX_CD8<64, CD8VT1>, VEX_W;
def VMOVLPSZ128mr : AVX512PSI<0x13, MRMDestMem, (outs),
(ins f64mem:$dst, VR128X:$src),
"vmovlps\t{$src, $dst|$dst, $src}",
- [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128X:$src)),
+ [(store (f64 (extractelt (bc_v2f64 (v4f32 VR128X:$src)),
(iPTR 0))), addr:$dst)],
IIC_SSE_MOV_LH>,
EVEX, EVEX_CD8<32, CD8VT2>;
def VMOVLPDZ128mr : AVX512PDI<0x13, MRMDestMem, (outs),
(ins f64mem:$dst, VR128X:$src),
"vmovlpd\t{$src, $dst|$dst, $src}",
- [(store (f64 (vector_extract (v2f64 VR128X:$src),
+ [(store (f64 (extractelt (v2f64 VR128X:$src),
(iPTR 0))), addr:$dst)],
IIC_SSE_MOV_LH>,
EVEX, EVEX_CD8<64, CD8VT1>, VEX_W;
-}
+
let Predicates = [HasAVX512] in {
// VMOVHPD patterns
- def : Pat<(store (f64 (vector_extract
+ def : Pat<(store (f64 (extractelt
(v2f64 (X86VPermilpi VR128X:$src, (i8 1))),
(iPTR 0))), addr:$dst),
(VMOVHPDZ128mr addr:$dst, VR128X:$src)>;
@@ -4442,21 +4675,19 @@ multiclass avx512_fma3p_213_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(_.VT (OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3))>,
AVX512FMA3Base;
- let mayLoad = 1 in {
- defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src2, _.MemOp:$src3),
- OpcodeStr, "$src3, $src2", "$src2, $src3",
- (_.VT (OpNode _.RC:$src1, _.RC:$src2, (_.LdFrag addr:$src3)))>,
- AVX512FMA3Base;
+ defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.MemOp:$src3),
+ OpcodeStr, "$src3, $src2", "$src2, $src3",
+ (_.VT (OpNode _.RC:$src1, _.RC:$src2, (_.LdFrag addr:$src3)))>,
+ AVX512FMA3Base;
- defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src2, _.ScalarMemOp:$src3),
- OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
- !strconcat("$src2, ${src3}", _.BroadcastStr ),
- (OpNode _.RC:$src1,
- _.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))>,
- AVX512FMA3Base, EVEX_B;
- }
+ defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.ScalarMemOp:$src3),
+ OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
+ !strconcat("$src2, ${src3}", _.BroadcastStr ),
+ (OpNode _.RC:$src1,
+ _.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))>,
+ AVX512FMA3Base, EVEX_B;
}
multiclass avx512_fma3_213_round<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -4509,21 +4740,19 @@ multiclass avx512_fma3p_231_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(_.VT (OpNode _.RC:$src2, _.RC:$src3, _.RC:$src1))>,
AVX512FMA3Base;
- let mayLoad = 1 in {
- defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src2, _.MemOp:$src3),
- OpcodeStr, "$src3, $src2", "$src2, $src3",
- (_.VT (OpNode _.RC:$src2, (_.LdFrag addr:$src3), _.RC:$src1))>,
- AVX512FMA3Base;
+ defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.MemOp:$src3),
+ OpcodeStr, "$src3, $src2", "$src2, $src3",
+ (_.VT (OpNode _.RC:$src2, (_.LdFrag addr:$src3), _.RC:$src1))>,
+ AVX512FMA3Base;
- defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src2, _.ScalarMemOp:$src3),
- OpcodeStr, "${src3}"##_.BroadcastStr##", $src2",
- "$src2, ${src3}"##_.BroadcastStr,
- (_.VT (OpNode _.RC:$src2,
- (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
- _.RC:$src1))>, AVX512FMA3Base, EVEX_B;
- }
+ defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.ScalarMemOp:$src3),
+ OpcodeStr, "${src3}"##_.BroadcastStr##", $src2",
+ "$src2, ${src3}"##_.BroadcastStr,
+ (_.VT (OpNode _.RC:$src2,
+ (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
+ _.RC:$src1))>, AVX512FMA3Base, EVEX_B;
}
multiclass avx512_fma3_231_round<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -4575,21 +4804,19 @@ multiclass avx512_fma3p_132_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(_.VT (OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3))>,
AVX512FMA3Base;
- let mayLoad = 1 in {
- defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src3, _.MemOp:$src2),
- OpcodeStr, "$src2, $src3", "$src3, $src2",
- (_.VT (OpNode _.RC:$src1, (_.LdFrag addr:$src2), _.RC:$src3))>,
- AVX512FMA3Base;
-
- defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src3, _.ScalarMemOp:$src2),
- OpcodeStr, "${src2}"##_.BroadcastStr##", $src3",
- "$src3, ${src2}"##_.BroadcastStr,
- (_.VT (OpNode _.RC:$src1,
- (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
- _.RC:$src3))>, AVX512FMA3Base, EVEX_B;
- }
+ defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src3, _.MemOp:$src2),
+ OpcodeStr, "$src2, $src3", "$src3, $src2",
+ (_.VT (OpNode _.RC:$src1, (_.LdFrag addr:$src2), _.RC:$src3))>,
+ AVX512FMA3Base;
+
+ defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src3, _.ScalarMemOp:$src2),
+ OpcodeStr, "${src2}"##_.BroadcastStr##", $src3",
+ "$src3, ${src2}"##_.BroadcastStr,
+ (_.VT (OpNode _.RC:$src1,
+ (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
+ _.RC:$src3))>, AVX512FMA3Base, EVEX_B;
}
multiclass avx512_fma3_132_round<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -4641,10 +4868,9 @@ multiclass avx512_fma3s_common<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
(ins _.RC:$src2, _.RC:$src3), OpcodeStr,
"$src3, $src2", "$src2, $src3", RHS_VEC_r>, AVX512FMA3Base;
- let mayLoad = 1 in
- defm m_Int: AVX512_maskable_3src_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src2, _.MemOp:$src3), OpcodeStr,
- "$src3, $src2", "$src2, $src3", RHS_VEC_m>, AVX512FMA3Base;
+ defm m_Int: AVX512_maskable_3src_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.ScalarMemOp:$src3), OpcodeStr,
+ "$src3, $src2", "$src2, $src3", RHS_VEC_m>, AVX512FMA3Base;
defm rb_Int: AVX512_maskable_3src_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc),
@@ -4657,12 +4883,11 @@ multiclass avx512_fma3s_common<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
!strconcat(OpcodeStr,
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[RHS_r]>;
- let mayLoad = 1 in
- def m : AVX512FMA3<opc, MRMSrcMem, (outs _.FRC:$dst),
- (ins _.FRC:$src1, _.FRC:$src2, _.ScalarMemOp:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [RHS_m]>;
+ def m : AVX512FMA3<opc, MRMSrcMem, (outs _.FRC:$dst),
+ (ins _.FRC:$src1, _.FRC:$src2, _.ScalarMemOp:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [RHS_m]>;
}// isCodeGenOnly = 1
}
}// Constraints = "$src1 = $dst"
@@ -4672,9 +4897,9 @@ multiclass avx512_fma3s_all<bits<8> opc213, bits<8> opc231, bits<8> opc132,
string SUFF> {
defm NAME#213#SUFF: avx512_fma3s_common<opc213, OpcodeStr#"213"#_.Suffix , _ ,
- (_.VT (OpNode _.RC:$src2, _.RC:$src1, _.RC:$src3)),
- (_.VT (OpNode _.RC:$src2, _.RC:$src1,
- (_.VT (scalar_to_vector(_.ScalarLdFrag addr:$src3))))),
+ (_.VT (OpNodeRnd _.RC:$src2, _.RC:$src1, _.RC:$src3, (i32 FROUND_CURRENT))),
+ (_.VT (OpNodeRnd _.RC:$src2, _.RC:$src1,
+ (_.VT (scalar_to_vector(_.ScalarLdFrag addr:$src3))), (i32 FROUND_CURRENT))),
(_.VT ( OpNodeRnd _.RC:$src2, _.RC:$src1, _.RC:$src3,
(i32 imm:$rc))),
(set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src2, _.FRC:$src1,
@@ -4683,10 +4908,10 @@ multiclass avx512_fma3s_all<bits<8> opc213, bits<8> opc231, bits<8> opc132,
(_.ScalarLdFrag addr:$src3))))>;
defm NAME#231#SUFF: avx512_fma3s_common<opc231, OpcodeStr#"231"#_.Suffix , _ ,
- (_.VT (OpNode _.RC:$src2, _.RC:$src3, _.RC:$src1)),
- (_.VT (OpNode _.RC:$src2,
+ (_.VT (OpNodeRnd _.RC:$src2, _.RC:$src3, _.RC:$src1, (i32 FROUND_CURRENT))),
+ (_.VT (OpNodeRnd _.RC:$src2,
(_.VT (scalar_to_vector(_.ScalarLdFrag addr:$src3))),
- _.RC:$src1)),
+ _.RC:$src1, (i32 FROUND_CURRENT))),
(_.VT ( OpNodeRnd _.RC:$src2, _.RC:$src3, _.RC:$src1,
(i32 imm:$rc))),
(set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src2, _.FRC:$src3,
@@ -4695,10 +4920,10 @@ multiclass avx512_fma3s_all<bits<8> opc213, bits<8> opc231, bits<8> opc132,
(_.ScalarLdFrag addr:$src3), _.FRC:$src1)))>;
defm NAME#132#SUFF: avx512_fma3s_common<opc132, OpcodeStr#"132"#_.Suffix , _ ,
- (_.VT (OpNode _.RC:$src1, _.RC:$src3, _.RC:$src2)),
- (_.VT (OpNode _.RC:$src1,
+ (_.VT (OpNodeRnd _.RC:$src1, _.RC:$src3, _.RC:$src2, (i32 FROUND_CURRENT))),
+ (_.VT (OpNodeRnd _.RC:$src1,
(_.VT (scalar_to_vector(_.ScalarLdFrag addr:$src3))),
- _.RC:$src2)),
+ _.RC:$src2, (i32 FROUND_CURRENT))),
(_.VT ( OpNodeRnd _.RC:$src1, _.RC:$src3, _.RC:$src2,
(i32 imm:$rc))),
(set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src1, _.FRC:$src3,
@@ -4725,6 +4950,53 @@ defm VFNMADD : avx512_fma3s<0xAD, 0xBD, 0x9D, "vfnmadd", X86Fnmadd, X86FnmaddRnd
defm VFNMSUB : avx512_fma3s<0xAF, 0xBF, 0x9F, "vfnmsub", X86Fnmsub, X86FnmsubRnd>;
//===----------------------------------------------------------------------===//
+// AVX-512 Packed Multiply of Unsigned 52-bit Integers and Add the Low 52-bit IFMA
+//===----------------------------------------------------------------------===//
+let Constraints = "$src1 = $dst" in {
+multiclass avx512_pmadd52_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ X86VectorVTInfo _> {
+ defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.RC:$src3),
+ OpcodeStr, "$src3, $src2", "$src2, $src3",
+ (_.VT (OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3))>,
+ AVX512FMA3Base;
+
+ defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.MemOp:$src3),
+ OpcodeStr, "$src3, $src2", "$src2, $src3",
+ (_.VT (OpNode _.RC:$src1, _.RC:$src2, (_.LdFrag addr:$src3)))>,
+ AVX512FMA3Base;
+
+ defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.ScalarMemOp:$src3),
+ OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
+ !strconcat("$src2, ${src3}", _.BroadcastStr ),
+ (OpNode _.RC:$src1,
+ _.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))>,
+ AVX512FMA3Base, EVEX_B;
+}
+} // Constraints = "$src1 = $dst"
+
+multiclass avx512_pmadd52_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ AVX512VLVectorVTInfo _> {
+ let Predicates = [HasIFMA] in {
+ defm Z : avx512_pmadd52_rm<opc, OpcodeStr, OpNode, _.info512>,
+ EVEX_V512, EVEX_CD8<_.info512.EltSize, CD8VF>;
+ }
+ let Predicates = [HasVLX, HasIFMA] in {
+ defm Z256 : avx512_pmadd52_rm<opc, OpcodeStr, OpNode, _.info256>,
+ EVEX_V256, EVEX_CD8<_.info256.EltSize, CD8VF>;
+ defm Z128 : avx512_pmadd52_rm<opc, OpcodeStr, OpNode, _.info128>,
+ EVEX_V128, EVEX_CD8<_.info128.EltSize, CD8VF>;
+ }
+}
+
+defm VPMADD52LUQ : avx512_pmadd52_common<0xb4, "vpmadd52luq", x86vpmadd52l,
+ avx512vl_i64_info>, VEX_W;
+defm VPMADD52HUQ : avx512_pmadd52_common<0xb5, "vpmadd52huq", x86vpmadd52h,
+ avx512vl_i64_info>, VEX_W;
+
+//===----------------------------------------------------------------------===//
// AVX-512 Scalar convert from sign integer to float/double
//===----------------------------------------------------------------------===//
@@ -4848,54 +5120,65 @@ def : Pat<(f64 (uint_to_fp GR64:$src)),
//===----------------------------------------------------------------------===//
// AVX-512 Scalar convert from float/double to integer
//===----------------------------------------------------------------------===//
-multiclass avx512_cvt_s_int_round<bits<8> opc, RegisterClass SrcRC,
- RegisterClass DstRC, Intrinsic Int,
- Operand memop, ComplexPattern mem_cpat, string asm> {
- let hasSideEffects = 0, Predicates = [HasAVX512] in {
- def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
+multiclass avx512_cvt_s_int_round<bits<8> opc, X86VectorVTInfo SrcVT ,
+ X86VectorVTInfo DstVT, SDNode OpNode, string asm> {
+ let Predicates = [HasAVX512] in {
+ def rr : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst), (ins SrcVT.RC:$src),
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
- [(set DstRC:$dst, (Int SrcRC:$src))]>, EVEX, VEX_LIG;
- def rb : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
- !strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"), []>,
+ [(set DstVT.RC:$dst, (OpNode (SrcVT.VT SrcVT.RC:$src),(i32 FROUND_CURRENT)))]>,
+ EVEX, VEX_LIG;
+ def rb : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst), (ins SrcVT.RC:$src, AVX512RC:$rc),
+ !strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"),
+ [(set DstVT.RC:$dst, (OpNode (SrcVT.VT SrcVT.RC:$src),(i32 imm:$rc)))]>,
EVEX, VEX_LIG, EVEX_B, EVEX_RC;
- let mayLoad = 1 in
- def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
- !strconcat(asm,"\t{$src, $dst|$dst, $src}"), []>, EVEX, VEX_LIG;
- } // hasSideEffects = 0, Predicates = [HasAVX512]
+ def rm : SI<opc, MRMSrcMem, (outs DstVT.RC:$dst), (ins SrcVT.ScalarMemOp:$src),
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
+ [(set DstVT.RC:$dst, (OpNode
+ (SrcVT.VT (scalar_to_vector (SrcVT.ScalarLdFrag addr:$src))),
+ (i32 FROUND_CURRENT)))]>,
+ EVEX, VEX_LIG;
+ } // Predicates = [HasAVX512]
}
// Convert float/double to signed/unsigned int 32/64
-defm VCVTSS2SIZ: avx512_cvt_s_int_round<0x2D, VR128X, GR32, int_x86_sse_cvtss2si,
- ssmem, sse_load_f32, "cvtss2si">,
+defm VCVTSS2SIZ: avx512_cvt_s_int_round<0x2D, f32x_info, i32x_info,
+ X86cvts2si, "cvtss2si">,
XS, EVEX_CD8<32, CD8VT1>;
-defm VCVTSS2SI64Z: avx512_cvt_s_int_round<0x2D, VR128X, GR64,
- int_x86_sse_cvtss2si64,
- ssmem, sse_load_f32, "cvtss2si">,
+defm VCVTSS2SI64Z: avx512_cvt_s_int_round<0x2D, f32x_info, i64x_info,
+ X86cvts2si, "cvtss2si">,
XS, VEX_W, EVEX_CD8<32, CD8VT1>;
-defm VCVTSS2USIZ: avx512_cvt_s_int_round<0x79, VR128X, GR32,
- int_x86_avx512_cvtss2usi,
- ssmem, sse_load_f32, "cvtss2usi">,
+defm VCVTSS2USIZ: avx512_cvt_s_int_round<0x79, f32x_info, i32x_info,
+ X86cvts2usi, "cvtss2usi">,
XS, EVEX_CD8<32, CD8VT1>;
-defm VCVTSS2USI64Z: avx512_cvt_s_int_round<0x79, VR128X, GR64,
- int_x86_avx512_cvtss2usi64, ssmem,
- sse_load_f32, "cvtss2usi">, XS, VEX_W,
+defm VCVTSS2USI64Z: avx512_cvt_s_int_round<0x79, f32x_info, i64x_info,
+ X86cvts2usi, "cvtss2usi">, XS, VEX_W,
EVEX_CD8<32, CD8VT1>;
-defm VCVTSD2SIZ: avx512_cvt_s_int_round<0x2D, VR128X, GR32, int_x86_sse2_cvtsd2si,
- sdmem, sse_load_f64, "cvtsd2si">,
+defm VCVTSD2SIZ: avx512_cvt_s_int_round<0x2D, f64x_info, i32x_info,
+ X86cvts2si, "cvtsd2si">,
XD, EVEX_CD8<64, CD8VT1>;
-defm VCVTSD2SI64Z: avx512_cvt_s_int_round<0x2D, VR128X, GR64,
- int_x86_sse2_cvtsd2si64,
- sdmem, sse_load_f64, "cvtsd2si">,
+defm VCVTSD2SI64Z: avx512_cvt_s_int_round<0x2D, f64x_info, i64x_info,
+ X86cvts2si, "cvtsd2si">,
XD, VEX_W, EVEX_CD8<64, CD8VT1>;
-defm VCVTSD2USIZ: avx512_cvt_s_int_round<0x79, VR128X, GR32,
- int_x86_avx512_cvtsd2usi,
- sdmem, sse_load_f64, "cvtsd2usi">,
+defm VCVTSD2USIZ: avx512_cvt_s_int_round<0x79, f64x_info, i32x_info,
+ X86cvts2usi, "cvtsd2usi">,
XD, EVEX_CD8<64, CD8VT1>;
-defm VCVTSD2USI64Z: avx512_cvt_s_int_round<0x79, VR128X, GR64,
- int_x86_avx512_cvtsd2usi64, sdmem,
- sse_load_f64, "cvtsd2usi">, XD, VEX_W,
+defm VCVTSD2USI64Z: avx512_cvt_s_int_round<0x79, f64x_info, i64x_info,
+ X86cvts2usi, "cvtsd2usi">, XD, VEX_W,
EVEX_CD8<64, CD8VT1>;
+// The SSE version of these instructions are disabled for AVX512.
+// Therefore, the SSE intrinsics are mapped to the AVX512 instructions.
+let Predicates = [HasAVX512] in {
+ def : Pat<(i32 (int_x86_sse_cvtss2si (v4f32 VR128X:$src))),
+ (VCVTSS2SIZrr (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
+ def : Pat<(i64 (int_x86_sse_cvtss2si64 (v4f32 VR128X:$src))),
+ (VCVTSS2SI64Zrr (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
+ def : Pat<(i32 (int_x86_sse2_cvtsd2si (v2f64 VR128X:$src))),
+ (VCVTSD2SIZrr (COPY_TO_REGCLASS VR128X:$src, FR64X))>;
+ def : Pat<(i64 (int_x86_sse2_cvtsd2si64 (v2f64 VR128X:$src))),
+ (VCVTSD2SI64Zrr (COPY_TO_REGCLASS VR128X:$src, FR64X))>;
+} // HasAVX512
+
let isCodeGenOnly = 1 , Predicates = [HasAVX512] in {
defm Int_VCVTSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
@@ -4910,14 +5193,14 @@ let isCodeGenOnly = 1 , Predicates = [HasAVX512] in {
int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
- defm Int_VCVTUSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
+ defm Int_VCVTUSI2SDZ : sse12_cvt_sint_3addr<0x7B, GR32, VR128X,
int_x86_avx512_cvtusi2sd, i32mem, loadi32, "cvtusi2sd{l}",
SSE_CVT_Scalar, 0>, XD, EVEX_4V;
} // isCodeGenOnly = 1, Predicates = [HasAVX512]
// Convert float/double to signed/unsigned int 32/64 with truncation
-multiclass avx512_cvt_s_all<bits<8> opc, string asm, X86VectorVTInfo _SrcRC,
- X86VectorVTInfo _DstRC, SDNode OpNode,
+multiclass avx512_cvt_s_all<bits<8> opc, string asm, X86VectorVTInfo _SrcRC,
+ X86VectorVTInfo _DstRC, SDNode OpNode,
SDNode OpNodeRnd>{
let Predicates = [HasAVX512] in {
def rr : SI<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.FRC:$src),
@@ -4926,56 +5209,56 @@ let Predicates = [HasAVX512] in {
def rb : SI<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.FRC:$src),
!strconcat(asm,"\t{{sae}, $src, $dst|$dst, $src, {sae}}"),
[]>, EVEX, EVEX_B;
- def rm : SI<opc, MRMSrcMem, (outs _DstRC.RC:$dst), (ins _SrcRC.MemOp:$src),
+ def rm : SI<opc, MRMSrcMem, (outs _DstRC.RC:$dst), (ins _SrcRC.ScalarMemOp:$src),
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
- [(set _DstRC.RC:$dst, (OpNode (_SrcRC.ScalarLdFrag addr:$src)))]>,
+ [(set _DstRC.RC:$dst, (OpNode (_SrcRC.ScalarLdFrag addr:$src)))]>,
EVEX;
- let isCodeGenOnly = 1,hasSideEffects = 0 in {
+ let isCodeGenOnly = 1 in {
def rr_Int : SI<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.RC:$src),
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
- [(set _DstRC.RC:$dst, (OpNodeRnd _SrcRC.RC:$src,
+ [(set _DstRC.RC:$dst, (OpNodeRnd (_SrcRC.VT _SrcRC.RC:$src),
(i32 FROUND_CURRENT)))]>, EVEX, VEX_LIG;
def rb_Int : SI<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.RC:$src),
!strconcat(asm,"\t{{sae}, $src, $dst|$dst, $src, {sae}}"),
- [(set _DstRC.RC:$dst, (OpNodeRnd _SrcRC.RC:$src,
- (i32 FROUND_NO_EXC)))]>,
+ [(set _DstRC.RC:$dst, (OpNodeRnd (_SrcRC.VT _SrcRC.RC:$src),
+ (i32 FROUND_NO_EXC)))]>,
EVEX,VEX_LIG , EVEX_B;
- let mayLoad = 1 in
- def rm_Int : SI<opc, MRMSrcMem, (outs _DstRC.RC:$dst),
+ let mayLoad = 1, hasSideEffects = 0 in
+ def rm_Int : SI<opc, MRMSrcMem, (outs _DstRC.RC:$dst),
(ins _SrcRC.MemOp:$src),
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
[]>, EVEX, VEX_LIG;
- } // isCodeGenOnly = 1, hasSideEffects = 0
+ } // isCodeGenOnly = 1
} //HasAVX512
}
-defm VCVTTSS2SIZ: avx512_cvt_s_all<0x2C, "cvttss2si", f32x_info, i32x_info,
- fp_to_sint,X86cvttss2IntRnd>,
+defm VCVTTSS2SIZ: avx512_cvt_s_all<0x2C, "cvttss2si", f32x_info, i32x_info,
+ fp_to_sint,X86cvtts2IntRnd>,
XS, EVEX_CD8<32, CD8VT1>;
-defm VCVTTSS2SI64Z: avx512_cvt_s_all<0x2C, "cvttss2si", f32x_info, i64x_info,
- fp_to_sint,X86cvttss2IntRnd>,
+defm VCVTTSS2SI64Z: avx512_cvt_s_all<0x2C, "cvttss2si", f32x_info, i64x_info,
+ fp_to_sint,X86cvtts2IntRnd>,
VEX_W, XS, EVEX_CD8<32, CD8VT1>;
-defm VCVTTSD2SIZ: avx512_cvt_s_all<0x2C, "cvttsd2si", f64x_info, i32x_info,
- fp_to_sint,X86cvttsd2IntRnd>,
+defm VCVTTSD2SIZ: avx512_cvt_s_all<0x2C, "cvttsd2si", f64x_info, i32x_info,
+ fp_to_sint,X86cvtts2IntRnd>,
XD, EVEX_CD8<64, CD8VT1>;
-defm VCVTTSD2SI64Z: avx512_cvt_s_all<0x2C, "cvttsd2si", f64x_info, i64x_info,
- fp_to_sint,X86cvttsd2IntRnd>,
+defm VCVTTSD2SI64Z: avx512_cvt_s_all<0x2C, "cvttsd2si", f64x_info, i64x_info,
+ fp_to_sint,X86cvtts2IntRnd>,
VEX_W, XD, EVEX_CD8<64, CD8VT1>;
-defm VCVTTSS2USIZ: avx512_cvt_s_all<0x78, "cvttss2usi", f32x_info, i32x_info,
- fp_to_uint,X86cvttss2UIntRnd>,
+defm VCVTTSS2USIZ: avx512_cvt_s_all<0x78, "cvttss2usi", f32x_info, i32x_info,
+ fp_to_uint,X86cvtts2UIntRnd>,
XS, EVEX_CD8<32, CD8VT1>;
-defm VCVTTSS2USI64Z: avx512_cvt_s_all<0x78, "cvttss2usi", f32x_info, i64x_info,
- fp_to_uint,X86cvttss2UIntRnd>,
+defm VCVTTSS2USI64Z: avx512_cvt_s_all<0x78, "cvttss2usi", f32x_info, i64x_info,
+ fp_to_uint,X86cvtts2UIntRnd>,
XS,VEX_W, EVEX_CD8<32, CD8VT1>;
-defm VCVTTSD2USIZ: avx512_cvt_s_all<0x78, "cvttsd2usi", f64x_info, i32x_info,
- fp_to_uint,X86cvttsd2UIntRnd>,
+defm VCVTTSD2USIZ: avx512_cvt_s_all<0x78, "cvttsd2usi", f64x_info, i32x_info,
+ fp_to_uint,X86cvtts2UIntRnd>,
XD, EVEX_CD8<64, CD8VT1>;
-defm VCVTTSD2USI64Z: avx512_cvt_s_all<0x78, "cvttsd2usi", f64x_info, i64x_info,
- fp_to_uint,X86cvttsd2UIntRnd>,
+defm VCVTTSD2USI64Z: avx512_cvt_s_all<0x78, "cvttsd2usi", f64x_info, i64x_info,
+ fp_to_uint,X86cvtts2UIntRnd>,
XD, VEX_W, EVEX_CD8<64, CD8VT1>;
let Predicates = [HasAVX512] in {
def : Pat<(i32 (int_x86_sse_cvttss2si (v4f32 VR128X:$src))),
@@ -4994,17 +5277,17 @@ let Predicates = [HasAVX512] in {
multiclass avx512_cvt_fp_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
X86VectorVTInfo _Src, SDNode OpNode> {
defm rr : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
- (ins _Src.RC:$src1, _Src.RC:$src2), OpcodeStr,
+ (ins _.RC:$src1, _Src.RC:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
- (_.VT (OpNode (_Src.VT _Src.RC:$src1),
- (_Src.VT _Src.RC:$src2)))>,
+ (_.VT (OpNode (_.VT _.RC:$src1),
+ (_Src.VT _Src.RC:$src2)))>,
EVEX_4V, VEX_LIG, Sched<[WriteCvtF2F]>;
defm rm : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
+ (ins _Src.RC:$src1, _Src.ScalarMemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
- (_.VT (OpNode (_Src.VT _Src.RC:$src1),
- (_Src.VT (scalar_to_vector
- (_Src.ScalarLdFrag addr:$src2)))))>,
+ (_.VT (OpNode (_.VT _.RC:$src1),
+ (_Src.VT (scalar_to_vector
+ (_Src.ScalarLdFrag addr:$src2)))))>,
EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>;
}
@@ -5012,9 +5295,9 @@ multiclass avx512_cvt_fp_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _
multiclass avx512_cvt_fp_sae_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
X86VectorVTInfo _Src, SDNode OpNodeRnd> {
defm rrb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
- (ins _Src.RC:$src1, _Src.RC:$src2), OpcodeStr,
+ (ins _.RC:$src1, _Src.RC:$src2), OpcodeStr,
"{sae}, $src2, $src1", "$src1, $src2, {sae}",
- (_.VT (OpNodeRnd (_Src.VT _Src.RC:$src1),
+ (_.VT (OpNodeRnd (_.VT _.RC:$src1),
(_Src.VT _Src.RC:$src2),
(i32 FROUND_NO_EXC)))>,
EVEX_4V, VEX_LIG, EVEX_B;
@@ -5024,15 +5307,15 @@ multiclass avx512_cvt_fp_sae_scalar<bits<8> opc, string OpcodeStr, X86VectorVTIn
multiclass avx512_cvt_fp_rc_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
X86VectorVTInfo _Src, SDNode OpNodeRnd> {
defm rrb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
- (ins _Src.RC:$src1, _Src.RC:$src2, AVX512RC:$rc), OpcodeStr,
+ (ins _.RC:$src1, _Src.RC:$src2, AVX512RC:$rc), OpcodeStr,
"$rc, $src2, $src1", "$src1, $src2, $rc",
- (_.VT (OpNodeRnd (_Src.VT _Src.RC:$src1),
+ (_.VT (OpNodeRnd (_.VT _.RC:$src1),
(_Src.VT _Src.RC:$src2), (i32 imm:$rc)))>,
EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>,
EVEX_B, EVEX_RC;
}
-multiclass avx512_cvt_fp_scalar_sd2ss<bits<8> opc, string OpcodeStr, SDNode OpNode,
- SDNode OpNodeRnd, X86VectorVTInfo _src,
+multiclass avx512_cvt_fp_scalar_sd2ss<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ SDNode OpNodeRnd, X86VectorVTInfo _src,
X86VectorVTInfo _dst> {
let Predicates = [HasAVX512] in {
defm Z : avx512_cvt_fp_scalar<opc, OpcodeStr, _dst, _src, OpNode>,
@@ -5042,22 +5325,22 @@ multiclass avx512_cvt_fp_scalar_sd2ss<bits<8> opc, string OpcodeStr, SDNode OpNo
}
}
-multiclass avx512_cvt_fp_scalar_ss2sd<bits<8> opc, string OpcodeStr, SDNode OpNode,
- SDNode OpNodeRnd, X86VectorVTInfo _src,
+multiclass avx512_cvt_fp_scalar_ss2sd<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ SDNode OpNodeRnd, X86VectorVTInfo _src,
X86VectorVTInfo _dst> {
let Predicates = [HasAVX512] in {
defm Z : avx512_cvt_fp_scalar<opc, OpcodeStr, _dst, _src, OpNode>,
- avx512_cvt_fp_sae_scalar<opc, OpcodeStr, _dst, _src, OpNodeRnd>,
+ avx512_cvt_fp_sae_scalar<opc, OpcodeStr, _dst, _src, OpNodeRnd>,
EVEX_CD8<32, CD8VT1>, XS, EVEX_V512;
}
}
defm VCVTSD2SS : avx512_cvt_fp_scalar_sd2ss<0x5A, "vcvtsd2ss", X86fround,
X86froundRnd, f64x_info, f32x_info>;
-defm VCVTSS2SD : avx512_cvt_fp_scalar_ss2sd<0x5A, "vcvtss2sd", X86fpext,
+defm VCVTSS2SD : avx512_cvt_fp_scalar_ss2sd<0x5A, "vcvtss2sd", X86fpext,
X86fpextRnd,f32x_info, f64x_info >;
-def : Pat<(f64 (fextend FR32X:$src)),
- (COPY_TO_REGCLASS (VCVTSS2SDZrr (COPY_TO_REGCLASS FR32X:$src, VR128X),
+def : Pat<(f64 (fextend FR32X:$src)),
+ (COPY_TO_REGCLASS (VCVTSS2SDZrr (COPY_TO_REGCLASS FR32X:$src, VR128X),
(COPY_TO_REGCLASS FR32X:$src, VR128X)), VR128X)>,
Requires<[HasAVX512]>;
def : Pat<(f64 (fextend (loadf32 addr:$src))),
@@ -5069,12 +5352,12 @@ def : Pat<(f64 (extloadf32 addr:$src)),
Requires<[HasAVX512, OptForSize]>;
def : Pat<(f64 (extloadf32 addr:$src)),
- (COPY_TO_REGCLASS (VCVTSS2SDZrr (v4f32 (IMPLICIT_DEF)),
+ (COPY_TO_REGCLASS (VCVTSS2SDZrr (v4f32 (IMPLICIT_DEF)),
(COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)), VR128X)>,
Requires<[HasAVX512, OptForSpeed]>;
-def : Pat<(f32 (fround FR64X:$src)),
- (COPY_TO_REGCLASS (VCVTSD2SSZrr (COPY_TO_REGCLASS FR64X:$src, VR128X),
+def : Pat<(f32 (fround FR64X:$src)),
+ (COPY_TO_REGCLASS (VCVTSD2SSZrr (COPY_TO_REGCLASS FR64X:$src, VR128X),
(COPY_TO_REGCLASS FR64X:$src, VR128X)), VR128X)>,
Requires<[HasAVX512]>;
//===----------------------------------------------------------------------===//
@@ -5097,7 +5380,7 @@ multiclass avx512_vcvt_fp<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
(bitconvert (_Src.LdFrag addr:$src)))))>, EVEX;
defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _Src.MemOp:$src), OpcodeStr,
+ (ins _Src.ScalarMemOp:$src), OpcodeStr,
"${src}"##Broadcast, "${src}"##Broadcast,
(_.VT (OpNode (_Src.VT
(X86VBroadcast (_Src.ScalarLdFrag addr:$src)))
@@ -5405,59 +5688,59 @@ defm VCVTUDQ2PS : avx512_cvtdq2ps<0x7A, "vcvtudq2ps", uint_to_fp,
X86VUintToFpRnd>, XD,
EVEX_CD8<32, CD8VF>;
-defm VCVTPS2DQ : avx512_cvtps2dq<0x5B, "vcvtps2dq", X86cvtps2Int,
- X86cvtps2IntRnd>, PD, EVEX_CD8<32, CD8VF>;
+defm VCVTPS2DQ : avx512_cvtps2dq<0x5B, "vcvtps2dq", X86cvtp2Int,
+ X86cvtp2IntRnd>, PD, EVEX_CD8<32, CD8VF>;
-defm VCVTPD2DQ : avx512_cvtpd2dq<0xE6, "vcvtpd2dq", X86cvtpd2Int,
- X86cvtpd2IntRnd>, XD, VEX_W,
+defm VCVTPD2DQ : avx512_cvtpd2dq<0xE6, "vcvtpd2dq", X86cvtp2Int,
+ X86cvtp2IntRnd>, XD, VEX_W,
EVEX_CD8<64, CD8VF>;
-defm VCVTPS2UDQ : avx512_cvtps2dq<0x79, "vcvtps2udq", X86cvtps2UInt,
- X86cvtps2UIntRnd>,
+defm VCVTPS2UDQ : avx512_cvtps2dq<0x79, "vcvtps2udq", X86cvtp2UInt,
+ X86cvtp2UIntRnd>,
PS, EVEX_CD8<32, CD8VF>;
-defm VCVTPD2UDQ : avx512_cvtpd2dq<0x79, "vcvtpd2udq", X86cvtpd2UInt,
- X86cvtpd2UIntRnd>, VEX_W,
+defm VCVTPD2UDQ : avx512_cvtpd2dq<0x79, "vcvtpd2udq", X86cvtp2UInt,
+ X86cvtp2UIntRnd>, VEX_W,
PS, EVEX_CD8<64, CD8VF>;
-defm VCVTPD2QQ : avx512_cvtpd2qq<0x7B, "vcvtpd2qq", X86cvtpd2Int,
- X86cvtpd2IntRnd>, VEX_W,
+defm VCVTPD2QQ : avx512_cvtpd2qq<0x7B, "vcvtpd2qq", X86cvtp2Int,
+ X86cvtp2IntRnd>, VEX_W,
PD, EVEX_CD8<64, CD8VF>;
-defm VCVTPS2QQ : avx512_cvtps2qq<0x7B, "vcvtps2qq", X86cvtps2Int,
- X86cvtps2IntRnd>, PD, EVEX_CD8<32, CD8VH>;
+defm VCVTPS2QQ : avx512_cvtps2qq<0x7B, "vcvtps2qq", X86cvtp2Int,
+ X86cvtp2IntRnd>, PD, EVEX_CD8<32, CD8VH>;
-defm VCVTPD2UQQ : avx512_cvtpd2qq<0x79, "vcvtpd2uqq", X86cvtpd2UInt,
- X86cvtpd2UIntRnd>, VEX_W,
+defm VCVTPD2UQQ : avx512_cvtpd2qq<0x79, "vcvtpd2uqq", X86cvtp2UInt,
+ X86cvtp2UIntRnd>, VEX_W,
PD, EVEX_CD8<64, CD8VF>;
-defm VCVTPS2UQQ : avx512_cvtps2qq<0x79, "vcvtps2uqq", X86cvtps2UInt,
- X86cvtps2UIntRnd>, PD, EVEX_CD8<32, CD8VH>;
+defm VCVTPS2UQQ : avx512_cvtps2qq<0x79, "vcvtps2uqq", X86cvtp2UInt,
+ X86cvtp2UIntRnd>, PD, EVEX_CD8<32, CD8VH>;
defm VCVTTPD2QQ : avx512_cvttpd2qq<0x7A, "vcvttpd2qq", fp_to_sint,
- X86VFpToSlongRnd>, VEX_W,
+ X86VFpToSintRnd>, VEX_W,
PD, EVEX_CD8<64, CD8VF>;
defm VCVTTPS2QQ : avx512_cvttps2qq<0x7A, "vcvttps2qq", fp_to_sint,
- X86VFpToSlongRnd>, PD, EVEX_CD8<32, CD8VH>;
+ X86VFpToSintRnd>, PD, EVEX_CD8<32, CD8VH>;
defm VCVTTPD2UQQ : avx512_cvttpd2qq<0x78, "vcvttpd2uqq", fp_to_uint,
- X86VFpToUlongRnd>, VEX_W,
+ X86VFpToUintRnd>, VEX_W,
PD, EVEX_CD8<64, CD8VF>;
defm VCVTTPS2UQQ : avx512_cvttps2qq<0x78, "vcvttps2uqq", fp_to_uint,
- X86VFpToUlongRnd>, PD, EVEX_CD8<32, CD8VH>;
+ X86VFpToUintRnd>, PD, EVEX_CD8<32, CD8VH>;
defm VCVTQQ2PD : avx512_cvtqq2pd<0xE6, "vcvtqq2pd", sint_to_fp,
- X86VSlongToFpRnd>, VEX_W, XS, EVEX_CD8<64, CD8VF>;
+ X86VSintToFpRnd>, VEX_W, XS, EVEX_CD8<64, CD8VF>;
defm VCVTUQQ2PD : avx512_cvtqq2pd<0x7A, "vcvtuqq2pd", uint_to_fp,
- X86VUlongToFpRnd>, VEX_W, XS, EVEX_CD8<64, CD8VF>;
+ X86VUintToFpRnd>, VEX_W, XS, EVEX_CD8<64, CD8VF>;
defm VCVTQQ2PS : avx512_cvtqq2ps<0x5B, "vcvtqq2ps", sint_to_fp,
- X86VSlongToFpRnd>, VEX_W, PS, EVEX_CD8<64, CD8VF>;
+ X86VSintToFpRnd>, VEX_W, PS, EVEX_CD8<64, CD8VF>;
defm VCVTUQQ2PS : avx512_cvtqq2ps<0x7A, "vcvtuqq2ps", uint_to_fp,
- X86VUlongToFpRnd>, VEX_W, XD, EVEX_CD8<64, CD8VF>;
+ X86VUintToFpRnd>, VEX_W, XD, EVEX_CD8<64, CD8VF>;
let Predicates = [HasAVX512, NoVLX] in {
def : Pat<(v8i32 (fp_to_uint (v8f32 VR256X:$src1))),
@@ -5468,6 +5751,10 @@ def : Pat<(v4i32 (fp_to_uint (v4f32 VR128X:$src1))),
(EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
(v16f32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
+def : Pat<(v4i32 (fp_to_uint (v4f64 VR256X:$src1))),
+ (EXTRACT_SUBREG (v8i32 (VCVTTPD2UDQZrr
+ (v8f64 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_xmm)>;
+
def : Pat<(v8f32 (uint_to_fp (v8i32 VR256X:$src1))),
(EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
@@ -5491,18 +5778,16 @@ let Predicates = [HasAVX512] in {
//===----------------------------------------------------------------------===//
// Half precision conversion instructions
//===----------------------------------------------------------------------===//
-multiclass avx512_cvtph2ps<X86VectorVTInfo _dest, X86VectorVTInfo _src,
+multiclass avx512_cvtph2ps<X86VectorVTInfo _dest, X86VectorVTInfo _src,
X86MemOperand x86memop, PatFrag ld_frag> {
defm rr : AVX512_maskable<0x13, MRMSrcReg, _dest ,(outs _dest.RC:$dst), (ins _src.RC:$src),
"vcvtph2ps", "$src", "$src",
(X86cvtph2ps (_src.VT _src.RC:$src),
(i32 FROUND_CURRENT))>, T8PD;
- let hasSideEffects = 0, mayLoad = 1 in {
- defm rm : AVX512_maskable<0x13, MRMSrcMem, _dest, (outs _dest.RC:$dst), (ins x86memop:$src),
- "vcvtph2ps", "$src", "$src",
- (X86cvtph2ps (_src.VT (bitconvert (ld_frag addr:$src))),
- (i32 FROUND_CURRENT))>, T8PD;
- }
+ defm rm : AVX512_maskable<0x13, MRMSrcMem, _dest, (outs _dest.RC:$dst), (ins x86memop:$src),
+ "vcvtph2ps", "$src", "$src",
+ (X86cvtph2ps (_src.VT (bitconvert (ld_frag addr:$src))),
+ (i32 FROUND_CURRENT))>, T8PD;
}
multiclass avx512_cvtph2ps_sae<X86VectorVTInfo _dest, X86VectorVTInfo _src> {
@@ -5515,44 +5800,45 @@ multiclass avx512_cvtph2ps_sae<X86VectorVTInfo _dest, X86VectorVTInfo _src> {
let Predicates = [HasAVX512] in {
defm VCVTPH2PSZ : avx512_cvtph2ps<v16f32_info, v16i16x_info, f256mem, loadv4i64>,
- avx512_cvtph2ps_sae<v16f32_info, v16i16x_info>,
+ avx512_cvtph2ps_sae<v16f32_info, v16i16x_info>,
EVEX, EVEX_V512, EVEX_CD8<32, CD8VH>;
let Predicates = [HasVLX] in {
- defm VCVTPH2PSZ256 : avx512_cvtph2ps<v8f32x_info, v8i16x_info, f128mem,
+ defm VCVTPH2PSZ256 : avx512_cvtph2ps<v8f32x_info, v8i16x_info, f128mem,
loadv2i64>,EVEX, EVEX_V256, EVEX_CD8<32, CD8VH>;
defm VCVTPH2PSZ128 : avx512_cvtph2ps<v4f32x_info, v8i16x_info, f64mem,
loadv2i64>, EVEX, EVEX_V128, EVEX_CD8<32, CD8VH>;
}
}
-multiclass avx512_cvtps2ph<X86VectorVTInfo _dest, X86VectorVTInfo _src,
+multiclass avx512_cvtps2ph<X86VectorVTInfo _dest, X86VectorVTInfo _src,
X86MemOperand x86memop> {
defm rr : AVX512_maskable<0x1D, MRMDestReg, _dest ,(outs _dest.RC:$dst),
- (ins _src.RC:$src1, i32u8imm:$src2),
- "vcvtps2ph", "$src2, $src1", "$src1, $src2",
+ (ins _src.RC:$src1, i32u8imm:$src2),
+ "vcvtps2ph", "$src2, $src1", "$src1, $src2",
(X86cvtps2ph (_src.VT _src.RC:$src1),
- (i32 imm:$src2),
- (i32 FROUND_CURRENT))>, AVX512AIi8Base;
- let hasSideEffects = 0, mayStore = 1 in {
- def mr : AVX512AIi8<0x1D, MRMDestMem, (outs),
- (ins x86memop:$dst, _src.RC:$src1, i32u8imm:$src2),
- "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(store (_dest.VT (X86cvtps2ph (_src.VT _src.RC:$src1),
- (i32 imm:$src2), (i32 FROUND_CURRENT) )),
- addr:$dst)]>;
- def mrk : AVX512AIi8<0x1D, MRMDestMem, (outs),
- (ins x86memop:$dst, _dest.KRCWM:$mask, _src.RC:$src1, i32u8imm:$src2),
- "vcvtps2ph\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
- []>, EVEX_K;
- }
+ (i32 imm:$src2),
+ (i32 FROUND_CURRENT)),
+ NoItinerary, 0, X86select>, AVX512AIi8Base;
+ def mr : AVX512AIi8<0x1D, MRMDestMem, (outs),
+ (ins x86memop:$dst, _src.RC:$src1, i32u8imm:$src2),
+ "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(store (_dest.VT (X86cvtps2ph (_src.VT _src.RC:$src1),
+ (i32 imm:$src2), (i32 FROUND_CURRENT) )),
+ addr:$dst)]>;
+ let hasSideEffects = 0, mayStore = 1 in
+ def mrk : AVX512AIi8<0x1D, MRMDestMem, (outs),
+ (ins x86memop:$dst, _dest.KRCWM:$mask, _src.RC:$src1, i32u8imm:$src2),
+ "vcvtps2ph\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
+ []>, EVEX_K;
}
multiclass avx512_cvtps2ph_sae<X86VectorVTInfo _dest, X86VectorVTInfo _src> {
defm rb : AVX512_maskable<0x1D, MRMDestReg, _dest ,(outs _dest.RC:$dst),
- (ins _src.RC:$src1, i32u8imm:$src2),
- "vcvtps2ph", "$src2, {sae}, $src1", "$src1, $src2, {sae}",
+ (ins _src.RC:$src1, i32u8imm:$src2),
+ "vcvtps2ph", "$src2, {sae}, $src1", "$src1, {sae}, $src2",
(X86cvtps2ph (_src.VT _src.RC:$src1),
- (i32 imm:$src2),
- (i32 FROUND_NO_EXC))>, EVEX_B, AVX512AIi8Base;
+ (i32 imm:$src2),
+ (i32 FROUND_NO_EXC)),
+ NoItinerary, 0, X86select>, EVEX_B, AVX512AIi8Base;
}
let Predicates = [HasAVX512] in {
defm VCVTPS2PHZ : avx512_cvtps2ph<v16i16x_info, v16f32_info, f256mem>,
@@ -5571,7 +5857,7 @@ multiclass avx512_ord_cmp_sae<bits<8> opc, X86VectorVTInfo _, SDNode OpNode,
string OpcodeStr> {
def rb: AVX512<opc, MRMSrcReg, (outs), (ins _.RC:$src1, _.RC:$src2),
!strconcat(OpcodeStr, "\t{{sae}, $src2, $src1|$src1, $src2, {sae}}"),
- [(set EFLAGS, (OpNode (_.VT _.RC:$src1), _.RC:$src2,
+ [(set EFLAGS, (OpNode (_.VT _.RC:$src1), _.RC:$src2,
(i32 FROUND_NO_EXC)))],
IIC_SSE_COMIS_RR>, EVEX, EVEX_B, VEX_LIG, EVEX_V128,
Sched<[WriteFAdd]>;
@@ -5623,18 +5909,16 @@ let Defs = [EFLAGS], Predicates = [HasAVX512] in {
/// avx512_fp14_s rcp14ss, rcp14sd, rsqrt14ss, rsqrt14sd
multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> {
- let hasSideEffects = 0, AddedComplexity = 20 , Predicates = [HasAVX512] in {
+ let AddedComplexity = 20 , Predicates = [HasAVX512] in {
defm rr : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.RC:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
(OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))>, EVEX_4V;
- let mayLoad = 1 in {
defm rm : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
(OpNode (_.VT _.RC:$src1),
(_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))))>, EVEX_4V;
- }
}
}
@@ -5653,18 +5937,16 @@ multiclass avx512_fp14_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
defm r: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src), OpcodeStr, "$src", "$src",
(_.FloatVT (OpNode _.RC:$src))>, EVEX, T8PD;
- let mayLoad = 1 in {
- defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
- (OpNode (_.FloatVT
- (bitconvert (_.LdFrag addr:$src))))>, EVEX, T8PD;
- defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.ScalarMemOp:$src), OpcodeStr,
- "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
- (OpNode (_.FloatVT
- (X86VBroadcast (_.ScalarLdFrag addr:$src))))>,
- EVEX, T8PD, EVEX_B;
- }
+ defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
+ (OpNode (_.FloatVT
+ (bitconvert (_.LdFrag addr:$src))))>, EVEX, T8PD;
+ defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.ScalarMemOp:$src), OpcodeStr,
+ "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
+ (OpNode (_.FloatVT
+ (X86VBroadcast (_.ScalarLdFrag addr:$src))))>,
+ EVEX, T8PD, EVEX_B;
}
multiclass avx512_fp14_p_vl_all<bits<8> opc, string OpcodeStr, SDNode OpNode> {
@@ -5710,7 +5992,7 @@ multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
(i32 FROUND_NO_EXC))>, EVEX_B;
defm m : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
(OpNode (_.VT _.RC:$src1),
(_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
@@ -5724,7 +6006,7 @@ multiclass avx512_eri_s<bits<8> opc, string OpcodeStr, SDNode OpNode> {
EVEX_CD8<64, CD8VT1>, VEX_W;
}
-let hasSideEffects = 0, Predicates = [HasERI] in {
+let Predicates = [HasERI] in {
defm VRCP28 : avx512_eri_s<0xCB, "vrcp28", X86rcp28s>, T8PD, EVEX_4V;
defm VRSQRT28 : avx512_eri_s<0xCD, "vrsqrt28", X86rsqrt28s>, T8PD, EVEX_4V;
}
@@ -5746,7 +6028,7 @@ multiclass avx512_fp28_p<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
(i32 FROUND_CURRENT))>;
defm mb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.MemOp:$src), OpcodeStr,
+ (ins _.ScalarMemOp:$src), OpcodeStr,
"${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
(OpNode (_.FloatVT
(X86VBroadcast (_.ScalarLdFrag addr:$src))),
@@ -5783,7 +6065,7 @@ multiclass avx512_fp_unaryop_packed<bits<8> opc, string OpcodeStr,
EVEX_V256, VEX_W, T8PD, EVEX_CD8<64, CD8VF>;
}
}
-let Predicates = [HasERI], hasSideEffects = 0 in {
+let Predicates = [HasERI] in {
defm VRSQRT28 : avx512_eri<0xCC, "vrsqrt28", X86rsqrt28>, EVEX;
defm VRCP28 : avx512_eri<0xCA, "vrcp28", X86rcp28>, EVEX;
@@ -5805,19 +6087,17 @@ multiclass avx512_sqrt_packed<bits<8> opc, string OpcodeStr,
defm r: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src), OpcodeStr, "$src", "$src",
(_.FloatVT (OpNode _.RC:$src))>, EVEX;
- let mayLoad = 1 in {
- defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
- (OpNode (_.FloatVT
- (bitconvert (_.LdFrag addr:$src))))>, EVEX;
+ defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
+ (OpNode (_.FloatVT
+ (bitconvert (_.LdFrag addr:$src))))>, EVEX;
- defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.ScalarMemOp:$src), OpcodeStr,
- "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
- (OpNode (_.FloatVT
- (X86VBroadcast (_.ScalarLdFrag addr:$src))))>,
- EVEX, EVEX_B;
- }
+ defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.ScalarMemOp:$src), OpcodeStr,
+ "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
+ (OpNode (_.FloatVT
+ (X86VBroadcast (_.ScalarLdFrag addr:$src))))>,
+ EVEX, EVEX_B;
}
multiclass avx512_sqrt_packed_all<bits<8> opc, string OpcodeStr,
@@ -5862,14 +6142,13 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
(OpNodeRnd (_.VT _.RC:$src1),
(_.VT _.RC:$src2),
(i32 FROUND_CURRENT))>;
- let mayLoad = 1 in
- defm m_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
- "$src2, $src1", "$src1, $src2",
- (OpNodeRnd (_.VT _.RC:$src1),
- (_.VT (scalar_to_vector
- (_.ScalarLdFrag addr:$src2))),
- (i32 FROUND_CURRENT))>;
+ defm m_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (OpNodeRnd (_.VT _.RC:$src1),
+ (_.VT (scalar_to_vector
+ (_.ScalarLdFrag addr:$src2))),
+ (i32 FROUND_CURRENT))>;
defm rb_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.RC:$src2, AVX512RC:$rc), OpcodeStr,
@@ -5879,7 +6158,7 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
(i32 imm:$rc))>,
EVEX_B, EVEX_RC;
- let isCodeGenOnly = 1 in {
+ let isCodeGenOnly = 1, hasSideEffects = 0 in {
def r : I<opc, MRMSrcReg, (outs _.FRC:$dst),
(ins _.FRC:$src1, _.FRC:$src2),
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>;
@@ -5940,9 +6219,9 @@ avx512_rndscale_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
(_.VT (X86RndScales (_.VT _.RC:$src1), (_.VT _.RC:$src2),
(i32 imm:$src3), (i32 FROUND_NO_EXC)))>, EVEX_B;
- let mayLoad = 1 in
defm m : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2, i32u8imm:$src3), OpcodeStr,
+ (ins _.RC:$src1, _.ScalarMemOp:$src2, i32u8imm:$src3),
+ OpcodeStr,
"$src3, $src2, $src1", "$src1, $src2, $src3",
(_.VT (X86RndScales (_.VT _.RC:$src1),
(_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
@@ -6022,7 +6301,7 @@ multiclass avx512_trunc_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
DestInfo.KRCWM:$mask ,
SrcInfo.RC:$src1)>;
- let mayStore = 1 in {
+ let mayStore = 1, mayLoad = 1, hasSideEffects = 0 in {
def mr : AVX512XS8I<opc, MRMDestMem, (outs),
(ins x86memop:$dst, SrcInfo.RC:$src),
OpcodeStr # "\t{$src, $dst|$dst, $src}",
@@ -6032,7 +6311,7 @@ multiclass avx512_trunc_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
(ins x86memop:$dst, SrcInfo.KRCWM:$mask, SrcInfo.RC:$src),
OpcodeStr # "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}",
[]>, EVEX, EVEX_K;
- }//mayStore = 1
+ }//mayStore = 1, mayLoad = 1, hasSideEffects = 0
}
multiclass avx512_trunc_mr_lowering<X86VectorVTInfo SrcInfo,
@@ -6223,23 +6502,21 @@ def: Pat<(v16i8 (X86vtrunc (v16i16 VR256X:$src))),
}
multiclass avx512_extend_common<bits<8> opc, string OpcodeStr,
- X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo,
- X86MemOperand x86memop, PatFrag LdFrag, SDNode OpNode>{
-
+ X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo,
+ X86MemOperand x86memop, PatFrag LdFrag, SDPatternOperator OpNode>{
defm rr : AVX512_maskable<opc, MRMSrcReg, DestInfo, (outs DestInfo.RC:$dst),
(ins SrcInfo.RC:$src), OpcodeStr ,"$src", "$src",
(DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src)))>,
EVEX;
- let mayLoad = 1 in {
- defm rm : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
- (ins x86memop:$src), OpcodeStr ,"$src", "$src",
- (DestInfo.VT (LdFrag addr:$src))>,
- EVEX;
- }
+ defm rm : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
+ (ins x86memop:$src), OpcodeStr ,"$src", "$src",
+ (DestInfo.VT (LdFrag addr:$src))>,
+ EVEX;
}
-multiclass avx512_extend_BW<bits<8> opc, string OpcodeStr, SDNode OpNode,
+multiclass avx512_extend_BW<bits<8> opc, string OpcodeStr,
+ SDPatternOperator OpNode,
string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi8")> {
let Predicates = [HasVLX, HasBWI] in {
defm Z128: avx512_extend_common<opc, OpcodeStr, v8i16x_info,
@@ -6257,7 +6534,8 @@ multiclass avx512_extend_BW<bits<8> opc, string OpcodeStr, SDNode OpNode,
}
}
-multiclass avx512_extend_BD<bits<8> opc, string OpcodeStr, SDNode OpNode,
+multiclass avx512_extend_BD<bits<8> opc, string OpcodeStr,
+ SDPatternOperator OpNode,
string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi8")> {
let Predicates = [HasVLX, HasAVX512] in {
defm Z128: avx512_extend_common<opc, OpcodeStr, v4i32x_info,
@@ -6275,7 +6553,8 @@ multiclass avx512_extend_BD<bits<8> opc, string OpcodeStr, SDNode OpNode,
}
}
-multiclass avx512_extend_BQ<bits<8> opc, string OpcodeStr, SDNode OpNode,
+multiclass avx512_extend_BQ<bits<8> opc, string OpcodeStr,
+ SDPatternOperator OpNode,
string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi8")> {
let Predicates = [HasVLX, HasAVX512] in {
defm Z128: avx512_extend_common<opc, OpcodeStr, v2i64x_info,
@@ -6293,7 +6572,8 @@ multiclass avx512_extend_BQ<bits<8> opc, string OpcodeStr, SDNode OpNode,
}
}
-multiclass avx512_extend_WD<bits<8> opc, string OpcodeStr, SDNode OpNode,
+multiclass avx512_extend_WD<bits<8> opc, string OpcodeStr,
+ SDPatternOperator OpNode,
string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi16")> {
let Predicates = [HasVLX, HasAVX512] in {
defm Z128: avx512_extend_common<opc, OpcodeStr, v4i32x_info,
@@ -6311,7 +6591,8 @@ multiclass avx512_extend_WD<bits<8> opc, string OpcodeStr, SDNode OpNode,
}
}
-multiclass avx512_extend_WQ<bits<8> opc, string OpcodeStr, SDNode OpNode,
+multiclass avx512_extend_WQ<bits<8> opc, string OpcodeStr,
+ SDPatternOperator OpNode,
string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi16")> {
let Predicates = [HasVLX, HasAVX512] in {
defm Z128: avx512_extend_common<opc, OpcodeStr, v2i64x_info,
@@ -6329,7 +6610,8 @@ multiclass avx512_extend_WQ<bits<8> opc, string OpcodeStr, SDNode OpNode,
}
}
-multiclass avx512_extend_DQ<bits<8> opc, string OpcodeStr, SDNode OpNode,
+multiclass avx512_extend_DQ<bits<8> opc, string OpcodeStr,
+ SDPatternOperator OpNode,
string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi32")> {
let Predicates = [HasVLX, HasAVX512] in {
@@ -6355,7 +6637,6 @@ defm VPMOVZXWD : avx512_extend_WD<0x33, "vpmovzxwd", X86vzext, "z">;
defm VPMOVZXWQ : avx512_extend_WQ<0x34, "vpmovzxwq", X86vzext, "z">;
defm VPMOVZXDQ : avx512_extend_DQ<0x35, "vpmovzxdq", X86vzext, "z">;
-
defm VPMOVSXBW: avx512_extend_BW<0x20, "vpmovsxbw", X86vsext, "s">;
defm VPMOVSXBD: avx512_extend_BD<0x21, "vpmovsxbd", X86vsext, "s">;
defm VPMOVSXBQ: avx512_extend_BQ<0x22, "vpmovsxbq", X86vsext, "s">;
@@ -6363,6 +6644,47 @@ defm VPMOVSXWD: avx512_extend_WD<0x23, "vpmovsxwd", X86vsext, "s">;
defm VPMOVSXWQ: avx512_extend_WQ<0x24, "vpmovsxwq", X86vsext, "s">;
defm VPMOVSXDQ: avx512_extend_DQ<0x25, "vpmovsxdq", X86vsext, "s">;
+// EXTLOAD patterns, implemented using vpmovz
+multiclass avx512_ext_lowering<string InstrStr, X86VectorVTInfo To,
+ X86VectorVTInfo From, PatFrag LdFrag> {
+ def : Pat<(To.VT (LdFrag addr:$src)),
+ (!cast<Instruction>("VPMOVZX"#InstrStr#"rm") addr:$src)>;
+ def : Pat<(To.VT (vselect To.KRCWM:$mask, (LdFrag addr:$src), To.RC:$src0)),
+ (!cast<Instruction>("VPMOVZX"#InstrStr#"rmk") To.RC:$src0,
+ To.KRC:$mask, addr:$src)>;
+ def : Pat<(To.VT (vselect To.KRCWM:$mask, (LdFrag addr:$src),
+ To.ImmAllZerosV)),
+ (!cast<Instruction>("VPMOVZX"#InstrStr#"rmkz") To.KRC:$mask,
+ addr:$src)>;
+}
+
+let Predicates = [HasVLX, HasBWI] in {
+ defm : avx512_ext_lowering<"BWZ128", v8i16x_info, v16i8x_info, extloadvi8>;
+ defm : avx512_ext_lowering<"BWZ256", v16i16x_info, v16i8x_info, extloadvi8>;
+}
+let Predicates = [HasBWI] in {
+ defm : avx512_ext_lowering<"BWZ", v32i16_info, v32i8x_info, extloadvi8>;
+}
+let Predicates = [HasVLX, HasAVX512] in {
+ defm : avx512_ext_lowering<"BDZ128", v4i32x_info, v16i8x_info, extloadvi8>;
+ defm : avx512_ext_lowering<"BDZ256", v8i32x_info, v16i8x_info, extloadvi8>;
+ defm : avx512_ext_lowering<"BQZ128", v2i64x_info, v16i8x_info, extloadvi8>;
+ defm : avx512_ext_lowering<"BQZ256", v4i64x_info, v16i8x_info, extloadvi8>;
+ defm : avx512_ext_lowering<"WDZ128", v4i32x_info, v8i16x_info, extloadvi16>;
+ defm : avx512_ext_lowering<"WDZ256", v8i32x_info, v8i16x_info, extloadvi16>;
+ defm : avx512_ext_lowering<"WQZ128", v2i64x_info, v8i16x_info, extloadvi16>;
+ defm : avx512_ext_lowering<"WQZ256", v4i64x_info, v8i16x_info, extloadvi16>;
+ defm : avx512_ext_lowering<"DQZ128", v2i64x_info, v4i32x_info, extloadvi32>;
+ defm : avx512_ext_lowering<"DQZ256", v4i64x_info, v4i32x_info, extloadvi32>;
+}
+let Predicates = [HasAVX512] in {
+ defm : avx512_ext_lowering<"BDZ", v16i32_info, v16i8x_info, extloadvi8>;
+ defm : avx512_ext_lowering<"BQZ", v8i64_info, v16i8x_info, extloadvi8>;
+ defm : avx512_ext_lowering<"WDZ", v16i32_info, v16i16x_info, extloadvi16>;
+ defm : avx512_ext_lowering<"WQZ", v8i64_info, v8i16x_info, extloadvi16>;
+ defm : avx512_ext_lowering<"DQZ", v8i64_info, v8i32x_info, extloadvi32>;
+}
+
//===----------------------------------------------------------------------===//
// GATHER - SCATTER Operations
@@ -6383,34 +6705,34 @@ multiclass avx512_gather<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
multiclass avx512_gather_q_pd<bits<8> dopc, bits<8> qopc,
AVX512VLVectorVTInfo _, string OpcodeStr, string SUFF> {
defm NAME##D##SUFF##Z: avx512_gather<dopc, OpcodeStr##"d", _.info512,
- vy32xmem, mgatherv8i32>, EVEX_V512, VEX_W;
+ vy512mem, mgatherv8i32>, EVEX_V512, VEX_W;
defm NAME##Q##SUFF##Z: avx512_gather<qopc, OpcodeStr##"q", _.info512,
- vz64mem, mgatherv8i64>, EVEX_V512, VEX_W;
+ vz512mem, mgatherv8i64>, EVEX_V512, VEX_W;
let Predicates = [HasVLX] in {
defm NAME##D##SUFF##Z256: avx512_gather<dopc, OpcodeStr##"d", _.info256,
- vx32xmem, mgatherv4i32>, EVEX_V256, VEX_W;
+ vx256xmem, mgatherv4i32>, EVEX_V256, VEX_W;
defm NAME##Q##SUFF##Z256: avx512_gather<qopc, OpcodeStr##"q", _.info256,
- vy64xmem, mgatherv4i64>, EVEX_V256, VEX_W;
+ vy256xmem, mgatherv4i64>, EVEX_V256, VEX_W;
defm NAME##D##SUFF##Z128: avx512_gather<dopc, OpcodeStr##"d", _.info128,
- vx32xmem, mgatherv4i32>, EVEX_V128, VEX_W;
+ vx128xmem, mgatherv4i32>, EVEX_V128, VEX_W;
defm NAME##Q##SUFF##Z128: avx512_gather<qopc, OpcodeStr##"q", _.info128,
- vx64xmem, mgatherv2i64>, EVEX_V128, VEX_W;
+ vx128xmem, mgatherv2i64>, EVEX_V128, VEX_W;
}
}
multiclass avx512_gather_d_ps<bits<8> dopc, bits<8> qopc,
AVX512VLVectorVTInfo _, string OpcodeStr, string SUFF> {
- defm NAME##D##SUFF##Z: avx512_gather<dopc, OpcodeStr##"d", _.info512, vz32mem,
+ defm NAME##D##SUFF##Z: avx512_gather<dopc, OpcodeStr##"d", _.info512, vz512mem,
mgatherv16i32>, EVEX_V512;
- defm NAME##Q##SUFF##Z: avx512_gather<qopc, OpcodeStr##"q", _.info256, vz64mem,
+ defm NAME##Q##SUFF##Z: avx512_gather<qopc, OpcodeStr##"q", _.info256, vz512mem,
mgatherv8i64>, EVEX_V512;
let Predicates = [HasVLX] in {
defm NAME##D##SUFF##Z256: avx512_gather<dopc, OpcodeStr##"d", _.info256,
- vy32xmem, mgatherv8i32>, EVEX_V256;
+ vy256xmem, mgatherv8i32>, EVEX_V256;
defm NAME##Q##SUFF##Z256: avx512_gather<qopc, OpcodeStr##"q", _.info128,
- vy64xmem, mgatherv4i64>, EVEX_V256;
+ vy128xmem, mgatherv4i64>, EVEX_V256;
defm NAME##D##SUFF##Z128: avx512_gather<dopc, OpcodeStr##"d", _.info128,
- vx32xmem, mgatherv4i32>, EVEX_V128;
+ vx128xmem, mgatherv4i32>, EVEX_V128;
defm NAME##Q##SUFF##Z128: avx512_gather<qopc, OpcodeStr##"q", _.info128,
vx64xmem, mgatherv2i64>, EVEX_V128;
}
@@ -6440,34 +6762,34 @@ let mayStore = 1, Constraints = "$mask = $mask_wb", ExeDomain = _.ExeDomain in
multiclass avx512_scatter_q_pd<bits<8> dopc, bits<8> qopc,
AVX512VLVectorVTInfo _, string OpcodeStr, string SUFF> {
defm NAME##D##SUFF##Z: avx512_scatter<dopc, OpcodeStr##"d", _.info512,
- vy32xmem, mscatterv8i32>, EVEX_V512, VEX_W;
+ vy512mem, mscatterv8i32>, EVEX_V512, VEX_W;
defm NAME##Q##SUFF##Z: avx512_scatter<qopc, OpcodeStr##"q", _.info512,
- vz64mem, mscatterv8i64>, EVEX_V512, VEX_W;
+ vz512mem, mscatterv8i64>, EVEX_V512, VEX_W;
let Predicates = [HasVLX] in {
defm NAME##D##SUFF##Z256: avx512_scatter<dopc, OpcodeStr##"d", _.info256,
- vx32xmem, mscatterv4i32>, EVEX_V256, VEX_W;
+ vx256xmem, mscatterv4i32>, EVEX_V256, VEX_W;
defm NAME##Q##SUFF##Z256: avx512_scatter<qopc, OpcodeStr##"q", _.info256,
- vy64xmem, mscatterv4i64>, EVEX_V256, VEX_W;
+ vy256xmem, mscatterv4i64>, EVEX_V256, VEX_W;
defm NAME##D##SUFF##Z128: avx512_scatter<dopc, OpcodeStr##"d", _.info128,
- vx32xmem, mscatterv4i32>, EVEX_V128, VEX_W;
+ vx128xmem, mscatterv4i32>, EVEX_V128, VEX_W;
defm NAME##Q##SUFF##Z128: avx512_scatter<qopc, OpcodeStr##"q", _.info128,
- vx64xmem, mscatterv2i64>, EVEX_V128, VEX_W;
+ vx128xmem, mscatterv2i64>, EVEX_V128, VEX_W;
}
}
multiclass avx512_scatter_d_ps<bits<8> dopc, bits<8> qopc,
AVX512VLVectorVTInfo _, string OpcodeStr, string SUFF> {
- defm NAME##D##SUFF##Z: avx512_scatter<dopc, OpcodeStr##"d", _.info512, vz32mem,
+ defm NAME##D##SUFF##Z: avx512_scatter<dopc, OpcodeStr##"d", _.info512, vz512mem,
mscatterv16i32>, EVEX_V512;
- defm NAME##Q##SUFF##Z: avx512_scatter<qopc, OpcodeStr##"q", _.info256, vz64mem,
+ defm NAME##Q##SUFF##Z: avx512_scatter<qopc, OpcodeStr##"q", _.info256, vz512mem,
mscatterv8i64>, EVEX_V512;
let Predicates = [HasVLX] in {
defm NAME##D##SUFF##Z256: avx512_scatter<dopc, OpcodeStr##"d", _.info256,
- vy32xmem, mscatterv8i32>, EVEX_V256;
+ vy256xmem, mscatterv8i32>, EVEX_V256;
defm NAME##Q##SUFF##Z256: avx512_scatter<qopc, OpcodeStr##"q", _.info128,
- vy64xmem, mscatterv4i64>, EVEX_V256;
+ vy128xmem, mscatterv4i64>, EVEX_V256;
defm NAME##D##SUFF##Z128: avx512_scatter<dopc, OpcodeStr##"d", _.info128,
- vx32xmem, mscatterv4i32>, EVEX_V128;
+ vx128xmem, mscatterv4i32>, EVEX_V128;
defm NAME##Q##SUFF##Z128: avx512_scatter<qopc, OpcodeStr##"q", _.info128,
vx64xmem, mscatterv2i64>, EVEX_V128;
}
@@ -6489,79 +6811,57 @@ multiclass avx512_gather_scatter_prefetch<bits<8> opc, Format F, string OpcodeSt
}
defm VGATHERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dps",
- VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
+ VK16WM, vz512mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
defm VGATHERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qps",
- VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
+ VK8WM, vz512mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
defm VGATHERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dpd",
- VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
+ VK8WM, vy512mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
defm VGATHERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qpd",
- VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+ VK8WM, vz512mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
defm VGATHERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dps",
- VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
+ VK16WM, vz512mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
defm VGATHERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qps",
- VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
+ VK8WM, vz512mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
defm VGATHERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dpd",
- VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
+ VK8WM, vy512mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
defm VGATHERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qpd",
- VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+ VK8WM, vz512mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
defm VSCATTERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dps",
- VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
+ VK16WM, vz512mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
defm VSCATTERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qps",
- VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
+ VK8WM, vz512mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
defm VSCATTERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dpd",
- VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
+ VK8WM, vy512mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
defm VSCATTERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qpd",
- VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+ VK8WM, vz512mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
defm VSCATTERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dps",
- VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
+ VK16WM, vz512mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
defm VSCATTERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qps",
- VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
+ VK8WM, vz512mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
defm VSCATTERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dpd",
- VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
+ VK8WM, vy512mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
defm VSCATTERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qpd",
- VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+ VK8WM, vz512mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
// Helper fragments to match sext vXi1 to vXiY.
def v16i1sextv16i32 : PatLeaf<(v16i32 (X86vsrai VR512:$src, (i8 31)))>;
def v8i1sextv8i64 : PatLeaf<(v8i64 (X86vsrai VR512:$src, (i8 63)))>;
-def : Pat<(store (i1 -1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
-def : Pat<(store (i1 1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
-def : Pat<(store (i1 0), addr:$dst), (MOV8mi addr:$dst, (i8 0))>;
-
-def : Pat<(store VK1:$src, addr:$dst),
- (MOV8mr addr:$dst,
- (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)),
- sub_8bit))>, Requires<[HasAVX512, NoDQI]>;
-
-def : Pat<(store VK8:$src, addr:$dst),
- (MOV8mr addr:$dst,
- (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
- sub_8bit))>, Requires<[HasAVX512, NoDQI]>;
-
-def truncstorei1 : PatFrag<(ops node:$val, node:$ptr),
- (truncstore node:$val, node:$ptr), [{
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
-}]>;
-
-def : Pat<(truncstorei1 GR8:$src, addr:$dst),
- (MOV8mr addr:$dst, GR8:$src)>;
-
multiclass cvt_by_vec_width<bits<8> opc, X86VectorVTInfo Vec, string OpcodeStr > {
def rr : AVX512XS8I<opc, MRMSrcReg, (outs Vec.RC:$dst), (ins Vec.KRC:$src),
!strconcat(OpcodeStr##Vec.Suffix, "\t{$src, $dst|$dst, $src}"),
@@ -6593,22 +6893,38 @@ multiclass avx512_convert_mask_to_vector<string OpcodeStr> {
defm VPMOVM2 : avx512_convert_mask_to_vector<"vpmovm2">;
multiclass convert_vector_to_mask_common<bits<8> opc, X86VectorVTInfo _, string OpcodeStr > {
-def rr : AVX512XS8I<opc, MRMSrcReg, (outs _.KRC:$dst), (ins _.RC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set _.KRC:$dst, (X86cvt2mask (_.VT _.RC:$src)))]>, EVEX;
+ def rr : AVX512XS8I<opc, MRMSrcReg, (outs _.KRC:$dst), (ins _.RC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set _.KRC:$dst, (X86cvt2mask (_.VT _.RC:$src)))]>, EVEX;
+}
+
+// Use 512bit version to implement 128/256 bit in case NoVLX.
+multiclass convert_vector_to_mask_lowering<X86VectorVTInfo ExtendInfo,
+ X86VectorVTInfo _> {
+
+ def : Pat<(_.KVT (X86cvt2mask (_.VT _.RC:$src))),
+ (_.KVT (COPY_TO_REGCLASS
+ (!cast<Instruction>(NAME#"Zrr")
+ (INSERT_SUBREG (ExtendInfo.VT (IMPLICIT_DEF)),
+ _.RC:$src, _.SubRegIdx)),
+ _.KRC))>;
}
multiclass avx512_convert_vector_to_mask<bits<8> opc, string OpcodeStr,
- AVX512VLVectorVTInfo VTInfo, Predicate prd> {
-let Predicates = [prd] in
- defm Z : convert_vector_to_mask_common <opc, VTInfo.info512, OpcodeStr>,
- EVEX_V512;
+ AVX512VLVectorVTInfo VTInfo, Predicate prd> {
+ let Predicates = [prd] in
+ defm Z : convert_vector_to_mask_common <opc, VTInfo.info512, OpcodeStr>,
+ EVEX_V512;
let Predicates = [prd, HasVLX] in {
defm Z256 : convert_vector_to_mask_common<opc, VTInfo.info256, OpcodeStr>,
- EVEX_V256;
+ EVEX_V256;
defm Z128 : convert_vector_to_mask_common<opc, VTInfo.info128, OpcodeStr>,
- EVEX_V128;
+ EVEX_V128;
+ }
+ let Predicates = [prd, NoVLX] in {
+ defm Z256_Alt : convert_vector_to_mask_lowering<VTInfo.info512, VTInfo.info256>;
+ defm Z128_Alt : convert_vector_to_mask_lowering<VTInfo.info512, VTInfo.info128>;
}
}
@@ -6631,7 +6947,7 @@ multiclass compress_by_vec_width<bits<8> opc, X86VectorVTInfo _,
(ins _.RC:$src1), OpcodeStr, "$src1", "$src1",
(_.VT (X86compress _.RC:$src1))>, AVX5128IBase;
- let mayStore = 1 in {
+ let mayStore = 1, hasSideEffects = 0 in
def mr : AVX5128I<opc, MRMDestMem, (outs),
(ins _.MemOp:$dst, _.RC:$src),
OpcodeStr # "\t{$src, $dst|$dst, $src}",
@@ -6644,7 +6960,6 @@ multiclass compress_by_vec_width<bits<8> opc, X86VectorVTInfo _,
(_.VT (X86compress _.RC:$src)), _.ImmAllZerosV)),
addr:$dst)]>,
EVEX_K, EVEX_CD8<_.EltSize, CD8VT1>;
- }
}
multiclass compress_by_elt_width<bits<8> opc, string OpcodeStr,
@@ -6673,7 +6988,6 @@ multiclass expand_by_vec_width<bits<8> opc, X86VectorVTInfo _,
(ins _.RC:$src1), OpcodeStr, "$src1", "$src1",
(_.VT (X86expand _.RC:$src1))>, AVX5128IBase;
- let mayLoad = 1 in
defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.MemOp:$src1), OpcodeStr, "$src1", "$src1",
(_.VT (X86expand (_.VT (bitconvert
@@ -6708,25 +7022,23 @@ multiclass avx512_unary_fp_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNo
X86VectorVTInfo _>{
defm rri : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src1, i32u8imm:$src2),
- OpcodeStr##_.Suffix, "$src2, $src1", "$src2, $src2",
- (OpNode (_.VT _.RC:$src1),
- (i32 imm:$src2),
- (i32 FROUND_CURRENT))>;
- let mayLoad = 1 in {
- defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.MemOp:$src1, i32u8imm:$src2),
OpcodeStr##_.Suffix, "$src2, $src1", "$src1, $src2",
- (OpNode (_.VT (bitconvert (_.LdFrag addr:$src1))),
+ (OpNode (_.VT _.RC:$src1),
(i32 imm:$src2),
(i32 FROUND_CURRENT))>;
- defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.ScalarMemOp:$src1, i32u8imm:$src2),
- OpcodeStr##_.Suffix, "$src2, ${src1}"##_.BroadcastStr,
- "${src1}"##_.BroadcastStr##", $src2",
- (OpNode (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src1))),
- (i32 imm:$src2),
- (i32 FROUND_CURRENT))>, EVEX_B;
- }
+ defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.MemOp:$src1, i32u8imm:$src2),
+ OpcodeStr##_.Suffix, "$src2, $src1", "$src1, $src2",
+ (OpNode (_.VT (bitconvert (_.LdFrag addr:$src1))),
+ (i32 imm:$src2),
+ (i32 FROUND_CURRENT))>;
+ defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.ScalarMemOp:$src1, i32u8imm:$src2),
+ OpcodeStr##_.Suffix, "$src2, ${src1}"##_.BroadcastStr,
+ "${src1}"##_.BroadcastStr##", $src2",
+ (OpNode (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src1))),
+ (i32 imm:$src2),
+ (i32 FROUND_CURRENT))>, EVEX_B;
}
//handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm),{sae}
@@ -6769,23 +7081,21 @@ multiclass avx512_fp_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(_.VT _.RC:$src2),
(i32 imm:$src3),
(i32 FROUND_CURRENT))>;
- let mayLoad = 1 in {
- defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2, i32u8imm:$src3),
- OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
- (OpNode (_.VT _.RC:$src1),
- (_.VT (bitconvert (_.LdFrag addr:$src2))),
- (i32 imm:$src3),
- (i32 FROUND_CURRENT))>;
- defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.ScalarMemOp:$src2, i32u8imm:$src3),
- OpcodeStr, "$src3, ${src2}"##_.BroadcastStr##", $src1",
- "$src1, ${src2}"##_.BroadcastStr##", $src3",
- (OpNode (_.VT _.RC:$src1),
- (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
- (i32 imm:$src3),
- (i32 FROUND_CURRENT))>, EVEX_B;
- }
+ defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.MemOp:$src2, i32u8imm:$src3),
+ OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT (bitconvert (_.LdFrag addr:$src2))),
+ (i32 imm:$src3),
+ (i32 FROUND_CURRENT))>;
+ defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2, i32u8imm:$src3),
+ OpcodeStr, "$src3, ${src2}"##_.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_.BroadcastStr##", $src3",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
+ (i32 imm:$src3),
+ (i32 FROUND_CURRENT))>, EVEX_B;
}
//handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm)
@@ -6799,14 +7109,13 @@ multiclass avx512_3Op_rm_imm8<bits<8> opc, string OpcodeStr, SDNode OpNode,
(DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1),
(SrcInfo.VT SrcInfo.RC:$src2),
(i8 imm:$src3)))>;
- let mayLoad = 1 in
- defm rmi : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
- (ins SrcInfo.RC:$src1, SrcInfo.MemOp:$src2, u8imm:$src3),
- OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
- (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1),
- (SrcInfo.VT (bitconvert
- (SrcInfo.LdFrag addr:$src2))),
- (i8 imm:$src3)))>;
+ defm rmi : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
+ (ins SrcInfo.RC:$src1, SrcInfo.MemOp:$src2, u8imm:$src3),
+ OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
+ (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1),
+ (SrcInfo.VT (bitconvert
+ (SrcInfo.LdFrag addr:$src2))),
+ (i8 imm:$src3)))>;
}
//handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm)
@@ -6816,14 +7125,13 @@ multiclass avx512_3Op_imm8<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _>:
avx512_3Op_rm_imm8<opc, OpcodeStr, OpNode, _, _>{
- let mayLoad = 1 in
- defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
- OpcodeStr, "$src3, ${src2}"##_.BroadcastStr##", $src1",
- "$src1, ${src2}"##_.BroadcastStr##", $src3",
- (OpNode (_.VT _.RC:$src1),
- (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
- (i8 imm:$src3))>, EVEX_B;
+ defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
+ OpcodeStr, "$src3, ${src2}"##_.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_.BroadcastStr##", $src3",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
+ (i8 imm:$src3))>, EVEX_B;
}
//handle scalar instruction reg_vec1 = op(reg_vec2,reg_vec3,imm)
@@ -6839,22 +7147,20 @@ multiclass avx512_fp_scalar_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(_.VT _.RC:$src2),
(i32 imm:$src3),
(i32 FROUND_CURRENT))>;
- let mayLoad = 1 in {
- defm rmi : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2, i32u8imm:$src3),
- OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
- (OpNode (_.VT _.RC:$src1),
- (_.VT (scalar_to_vector
- (_.ScalarLdFrag addr:$src2))),
- (i32 imm:$src3),
- (i32 FROUND_CURRENT))>;
+ defm rmi : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.MemOp:$src2, i32u8imm:$src3),
+ OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT (scalar_to_vector
+ (_.ScalarLdFrag addr:$src2))),
+ (i32 imm:$src3),
+ (i32 FROUND_CURRENT))>;
- let isAsmParserOnly = 1 in {
- defm rmi_alt :AVX512_maskable_in_asm<opc, MRMSrcMem, _, (outs _.FRC:$dst),
- (ins _.FRC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
- OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
- []>;
- }
+ let isAsmParserOnly = 1, mayLoad = 1, hasSideEffects = 0 in {
+ defm rmi_alt :AVX512_maskable_in_asm<opc, MRMSrcMem, _, (outs _.FRC:$dst),
+ (ins _.FRC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
+ OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
+ []>;
}
}
@@ -6940,19 +7246,6 @@ multiclass avx512_common_unary_fp_sae_packed_imm_all<string OpcodeStr,
opcPd, OpNode, prd>, EVEX_CD8<64, CD8VF>, VEX_W;
}
-defm VFIXUPIMMPD : avx512_common_fp_sae_packed_imm<"vfixupimmpd",
- avx512vl_f64_info, 0x54, X86VFixupimm, HasAVX512>,
- AVX512AIi8Base, EVEX_4V, EVEX_CD8<64, CD8VF>, VEX_W;
-defm VFIXUPIMMPS : avx512_common_fp_sae_packed_imm<"vfixupimmps",
- avx512vl_f32_info, 0x54, X86VFixupimm, HasAVX512>,
- AVX512AIi8Base, EVEX_4V, EVEX_CD8<32, CD8VF>;
-
-defm VFIXUPIMMSD: avx512_common_fp_sae_scalar_imm<"vfixupimmsd", f64x_info,
- 0x55, X86VFixupimm, HasAVX512>,
- AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<64, CD8VT1>, VEX_W;
-defm VFIXUPIMMSS: avx512_common_fp_sae_scalar_imm<"vfixupimmss", f32x_info,
- 0x55, X86VFixupimm, HasAVX512>,
- AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<32, CD8VT1>;
defm VREDUCE : avx512_common_unary_fp_sae_packed_imm_all<"vreduce", 0x56, 0x56,
X86VReduce, HasDQI>, AVX512AIi8Base, EVEX;
@@ -7043,7 +7336,7 @@ defm VALIGND: avx512_valign<"valignd", avx512vl_i32_info>,
defm VALIGNQ: avx512_valign<"valignq", avx512vl_i64_info>,
EVEX_CD8<64, CD8VF>, VEX_W;
-multiclass avx512_vpalign_lowering<X86VectorVTInfo _ , list<Predicate> p>{
+multiclass avx512_vpalignr_lowering<X86VectorVTInfo _ , list<Predicate> p>{
let Predicates = p in
def NAME#_.VTName#rri:
Pat<(_.VT (X86PAlignr _.RC:$src1, _.RC:$src2, (i8 imm:$imm))),
@@ -7051,18 +7344,18 @@ multiclass avx512_vpalign_lowering<X86VectorVTInfo _ , list<Predicate> p>{
_.RC:$src1, _.RC:$src2, imm:$imm)>;
}
-multiclass avx512_vpalign_lowering_common<AVX512VLVectorVTInfo _>:
- avx512_vpalign_lowering<_.info512, [HasBWI]>,
- avx512_vpalign_lowering<_.info128, [HasBWI, HasVLX]>,
- avx512_vpalign_lowering<_.info256, [HasBWI, HasVLX]>;
+multiclass avx512_vpalignr_lowering_common<AVX512VLVectorVTInfo _>:
+ avx512_vpalignr_lowering<_.info512, [HasBWI]>,
+ avx512_vpalignr_lowering<_.info128, [HasBWI, HasVLX]>,
+ avx512_vpalignr_lowering<_.info256, [HasBWI, HasVLX]>;
-defm VPALIGN: avx512_common_3Op_rm_imm8<0x0F, X86PAlignr, "vpalignr" ,
+defm VPALIGNR: avx512_common_3Op_rm_imm8<0x0F, X86PAlignr, "vpalignr" ,
avx512vl_i8_info, avx512vl_i8_info>,
- avx512_vpalign_lowering_common<avx512vl_i16_info>,
- avx512_vpalign_lowering_common<avx512vl_i32_info>,
- avx512_vpalign_lowering_common<avx512vl_f32_info>,
- avx512_vpalign_lowering_common<avx512vl_i64_info>,
- avx512_vpalign_lowering_common<avx512vl_f64_info>,
+ avx512_vpalignr_lowering_common<avx512vl_i16_info>,
+ avx512_vpalignr_lowering_common<avx512vl_i32_info>,
+ avx512_vpalignr_lowering_common<avx512vl_f32_info>,
+ avx512_vpalignr_lowering_common<avx512vl_i64_info>,
+ avx512_vpalignr_lowering_common<avx512vl_f64_info>,
EVEX_CD8<8, CD8VF>;
defm VDBPSADBW: avx512_common_3Op_rm_imm8<0x42, X86dbpsadbw, "vdbpsadbw" ,
@@ -7075,25 +7368,23 @@ multiclass avx512_unary_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
"$src1", "$src1",
(_.VT (OpNode _.RC:$src1))>, EVEX, AVX5128IBase;
- let mayLoad = 1 in
- defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.MemOp:$src1), OpcodeStr,
- "$src1", "$src1",
- (_.VT (OpNode (bitconvert (_.LdFrag addr:$src1))))>,
- EVEX, AVX5128IBase, EVEX_CD8<_.EltSize, CD8VF>;
+ defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.MemOp:$src1), OpcodeStr,
+ "$src1", "$src1",
+ (_.VT (OpNode (bitconvert (_.LdFrag addr:$src1))))>,
+ EVEX, AVX5128IBase, EVEX_CD8<_.EltSize, CD8VF>;
}
multiclass avx512_unary_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> :
avx512_unary_rm<opc, OpcodeStr, OpNode, _> {
- let mayLoad = 1 in
- defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.ScalarMemOp:$src1), OpcodeStr,
- "${src1}"##_.BroadcastStr,
- "${src1}"##_.BroadcastStr,
- (_.VT (OpNode (X86VBroadcast
- (_.ScalarLdFrag addr:$src1))))>,
- EVEX, AVX5128IBase, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
+ defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.ScalarMemOp:$src1), OpcodeStr,
+ "${src1}"##_.BroadcastStr,
+ "${src1}"##_.BroadcastStr,
+ (_.VT (OpNode (X86VBroadcast
+ (_.ScalarLdFrag addr:$src1))))>,
+ EVEX, AVX5128IBase, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
}
multiclass avx512_unary_rm_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -7185,12 +7476,11 @@ multiclass avx512_movddup_128<bits<8> opc, string OpcodeStr, SDNode OpNode,
defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src), OpcodeStr, "$src", "$src",
(_.VT (OpNode (_.VT _.RC:$src)))>, EVEX;
- let mayLoad = 1 in
- defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.ScalarMemOp:$src), OpcodeStr, "$src", "$src",
- (_.VT (OpNode (_.VT (scalar_to_vector
- (_.ScalarLdFrag addr:$src)))))>,
- EVEX, EVEX_CD8<_.EltSize, CD8VH>;
+ defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.ScalarMemOp:$src), OpcodeStr, "$src", "$src",
+ (_.VT (OpNode (_.VT (scalar_to_vector
+ (_.ScalarLdFrag addr:$src)))))>,
+ EVEX, EVEX_CD8<_.EltSize, CD8VH>;
}
multiclass avx512_movddup_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -7221,8 +7511,8 @@ def : Pat<(v2f64 (X86VBroadcast (loadf64 addr:$src))),
//===----------------------------------------------------------------------===//
// AVX-512 - Unpack Instructions
//===----------------------------------------------------------------------===//
-defm VUNPCKH : avx512_fp_binop_p<0x15, "vunpckh", X86Unpckh>;
-defm VUNPCKL : avx512_fp_binop_p<0x14, "vunpckl", X86Unpckl>;
+defm VUNPCKH : avx512_fp_binop_p<0x15, "vunpckh", X86Unpckh, HasAVX512>;
+defm VUNPCKL : avx512_fp_binop_p<0x14, "vunpckl", X86Unpckl, HasAVX512>;
defm VPUNPCKLBW : avx512_binop_rm_vl_b<0x60, "vpunpcklbw", X86Unpckl,
SSE_INTALU_ITINS_P, HasBWI>;
@@ -7248,14 +7538,13 @@ defm VPUNPCKHQDQ : avx512_binop_rm_vl_q<0x6D, "vpunpckhqdq", X86Unpckh,
multiclass avx512_extract_elt_bw_m<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> {
- let mayStore = 1 in
- def mr : AVX512Ii8<opc, MRMDestMem, (outs),
- (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2),
- OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(store (_.EltVT (trunc (assertzext (OpNode (_.VT _.RC:$src1),
- imm:$src2)))),
- addr:$dst)]>,
- EVEX, EVEX_CD8<_.EltSize, CD8VT1>;
+ def mr : AVX512Ii8<opc, MRMDestMem, (outs),
+ (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2),
+ OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(store (_.EltVT (trunc (assertzext (OpNode (_.VT _.RC:$src1),
+ imm:$src2)))),
+ addr:$dst)]>,
+ EVEX, EVEX_CD8<_.EltSize, CD8VT1>;
}
multiclass avx512_extract_elt_b<string OpcodeStr, X86VectorVTInfo _> {
@@ -7280,6 +7569,7 @@ multiclass avx512_extract_elt_w<string OpcodeStr, X86VectorVTInfo _> {
(X86pextrw (_.VT _.RC:$src1), imm:$src2))]>,
EVEX, PD;
+ let hasSideEffects = 0 in
def rr_REV : AVX512Ii8<0x15, MRMDestReg, (outs GR32orGR64:$dst),
(ins _.RC:$src1, u8imm:$src2),
OpcodeStr#".s\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
@@ -7299,13 +7589,12 @@ multiclass avx512_extract_elt_dq<string OpcodeStr, X86VectorVTInfo _,
(extractelt (_.VT _.RC:$src1), imm:$src2))]>,
EVEX, TAPD;
- let mayStore = 1 in
- def mr : AVX512Ii8<0x16, MRMDestMem, (outs),
- (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2),
- OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(store (extractelt (_.VT _.RC:$src1),
- imm:$src2),addr:$dst)]>,
- EVEX, EVEX_CD8<_.EltSize, CD8VT1>, TAPD;
+ def mr : AVX512Ii8<0x16, MRMDestMem, (outs),
+ (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2),
+ OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(store (extractelt (_.VT _.RC:$src1),
+ imm:$src2),addr:$dst)]>,
+ EVEX, EVEX_CD8<_.EltSize, CD8VT1>, TAPD;
}
}
@@ -7380,33 +7669,33 @@ multiclass avx512_shift_packed<bits<8> opc, SDNode OpNode, Format MRMr,
(outs _.RC:$dst), (ins _.RC:$src1, u8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.RC:$dst,(_.VT (OpNode _.RC:$src1, (i8 imm:$src2))))]>;
- let mayLoad = 1 in
- def rm : AVX512<opc, MRMm,
- (outs _.RC:$dst), (ins _.MemOp:$src1, u8imm:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set _.RC:$dst,(_.VT (OpNode
- (_.LdFrag addr:$src1), (i8 imm:$src2))))]>;
+ def rm : AVX512<opc, MRMm,
+ (outs _.RC:$dst), (ins _.MemOp:$src1, u8imm:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set _.RC:$dst,(_.VT (OpNode
+ (_.VT (bitconvert (_.LdFrag addr:$src1))),
+ (i8 imm:$src2))))]>;
}
-multiclass avx512_shift_packed_all<bits<8> opc, SDNode OpNode, Format MRMr,
+multiclass avx512_shift_packed_all<bits<8> opc, SDNode OpNode, Format MRMr,
Format MRMm, string OpcodeStr, Predicate prd>{
let Predicates = [prd] in
- defm Z512 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
- OpcodeStr, v8i64_info>, EVEX_V512;
+ defm Z512 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
+ OpcodeStr, v64i8_info>, EVEX_V512;
let Predicates = [prd, HasVLX] in {
- defm Z256 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
- OpcodeStr, v4i64x_info>, EVEX_V256;
- defm Z128 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
- OpcodeStr, v2i64x_info>, EVEX_V128;
+ defm Z256 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
+ OpcodeStr, v32i8x_info>, EVEX_V256;
+ defm Z128 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
+ OpcodeStr, v16i8x_info>, EVEX_V128;
}
}
-defm VPSLLDQ : avx512_shift_packed_all<0x73, X86vshldq, MRM7r, MRM7m, "vpslldq",
+defm VPSLLDQ : avx512_shift_packed_all<0x73, X86vshldq, MRM7r, MRM7m, "vpslldq",
HasBWI>, AVX512PDIi8Base, EVEX_4V;
-defm VPSRLDQ : avx512_shift_packed_all<0x73, X86vshrdq, MRM3r, MRM3m, "vpsrldq",
+defm VPSRLDQ : avx512_shift_packed_all<0x73, X86vshrdq, MRM3r, MRM3m, "vpsrldq",
HasBWI>, AVX512PDIi8Base, EVEX_4V;
-multiclass avx512_psadbw_packed<bits<8> opc, SDNode OpNode,
+multiclass avx512_psadbw_packed<bits<8> opc, SDNode OpNode,
string OpcodeStr, X86VectorVTInfo _dst,
X86VectorVTInfo _src>{
def rr : AVX512BI<opc, MRMSrcReg,
@@ -7415,17 +7704,16 @@ multiclass avx512_psadbw_packed<bits<8> opc, SDNode OpNode,
[(set _dst.RC:$dst,(_dst.VT
(OpNode (_src.VT _src.RC:$src1),
(_src.VT _src.RC:$src2))))]>;
- let mayLoad = 1 in
- def rm : AVX512BI<opc, MRMSrcMem,
- (outs _dst.RC:$dst), (ins _src.RC:$src1, _src.MemOp:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set _dst.RC:$dst,(_dst.VT
- (OpNode (_src.VT _src.RC:$src1),
- (_src.VT (bitconvert
- (_src.LdFrag addr:$src2))))))]>;
+ def rm : AVX512BI<opc, MRMSrcMem,
+ (outs _dst.RC:$dst), (ins _src.RC:$src1, _src.MemOp:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set _dst.RC:$dst,(_dst.VT
+ (OpNode (_src.VT _src.RC:$src1),
+ (_src.VT (bitconvert
+ (_src.LdFrag addr:$src2))))))]>;
}
-multiclass avx512_psadbw_packed_all<bits<8> opc, SDNode OpNode,
+multiclass avx512_psadbw_packed_all<bits<8> opc, SDNode OpNode,
string OpcodeStr, Predicate prd> {
let Predicates = [prd] in
defm Z512 : avx512_psadbw_packed<opc, OpNode, OpcodeStr, v8i64_info,
@@ -7438,7 +7726,7 @@ multiclass avx512_psadbw_packed_all<bits<8> opc, SDNode OpNode,
}
}
-defm VPSADBW : avx512_psadbw_packed_all<0xf6, X86psadbw, "vpsadbw",
+defm VPSADBW : avx512_psadbw_packed_all<0xf6, X86psadbw, "vpsadbw",
HasBWI>, EVEX_4V;
multiclass avx512_ternlog<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -7446,30 +7734,28 @@ multiclass avx512_ternlog<bits<8> opc, string OpcodeStr, SDNode OpNode,
let Constraints = "$src1 = $dst" in {
defm rri : AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src2, _.RC:$src3, u8imm:$src4),
- OpcodeStr, "$src4, $src3, $src2", "$src2, $src3, $src3",
+ OpcodeStr, "$src4, $src3, $src2", "$src2, $src3, $src4",
(OpNode (_.VT _.RC:$src1),
(_.VT _.RC:$src2),
(_.VT _.RC:$src3),
(i8 imm:$src4))>, AVX512AIi8Base, EVEX_4V;
- let mayLoad = 1 in {
- defm rmi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src2, _.MemOp:$src3, u8imm:$src4),
- OpcodeStr, "$src4, $src3, $src2", "$src2, $src3, $src3",
- (OpNode (_.VT _.RC:$src1),
- (_.VT _.RC:$src2),
- (_.VT (bitconvert (_.LdFrag addr:$src3))),
- (i8 imm:$src4))>,
- AVX512AIi8Base, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
- defm rmbi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src2, _.ScalarMemOp:$src3, u8imm:$src4),
- OpcodeStr, "$src4, ${src3}"##_.BroadcastStr##", $src2",
- "$src2, ${src3}"##_.BroadcastStr##", $src4",
- (OpNode (_.VT _.RC:$src1),
- (_.VT _.RC:$src2),
- (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
- (i8 imm:$src4))>, EVEX_B,
- AVX512AIi8Base, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
- }
+ defm rmi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.MemOp:$src3, u8imm:$src4),
+ OpcodeStr, "$src4, $src3, $src2", "$src2, $src3, $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_.VT (bitconvert (_.LdFrag addr:$src3))),
+ (i8 imm:$src4))>,
+ AVX512AIi8Base, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
+ defm rmbi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.ScalarMemOp:$src3, u8imm:$src4),
+ OpcodeStr, "$src4, ${src3}"##_.BroadcastStr##", $src2",
+ "$src2, ${src3}"##_.BroadcastStr##", $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
+ (i8 imm:$src4))>, EVEX_B,
+ AVX512AIi8Base, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
}// Constraints = "$src1 = $dst"
}
@@ -7485,3 +7771,109 @@ multiclass avx512_common_ternlog<string OpcodeStr, AVX512VLVectorVTInfo _>{
defm VPTERNLOGD : avx512_common_ternlog<"vpternlogd", avx512vl_i32_info>;
defm VPTERNLOGQ : avx512_common_ternlog<"vpternlogq", avx512vl_i64_info>, VEX_W;
+//===----------------------------------------------------------------------===//
+// AVX-512 - FixupImm
+//===----------------------------------------------------------------------===//
+
+multiclass avx512_fixupimm_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ X86VectorVTInfo _>{
+ let Constraints = "$src1 = $dst" in {
+ defm rri : AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4),
+ OpcodeStr##_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_.IntVT _.RC:$src3),
+ (i32 imm:$src4),
+ (i32 FROUND_CURRENT))>;
+ defm rmi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.MemOp:$src3, i32u8imm:$src4),
+ OpcodeStr##_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_.IntVT (bitconvert (_.LdFrag addr:$src3))),
+ (i32 imm:$src4),
+ (i32 FROUND_CURRENT))>;
+ defm rmbi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.ScalarMemOp:$src3, i32u8imm:$src4),
+ OpcodeStr##_.Suffix, "$src4, ${src3}"##_.BroadcastStr##", $src2",
+ "$src2, ${src3}"##_.BroadcastStr##", $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_.IntVT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
+ (i32 imm:$src4),
+ (i32 FROUND_CURRENT))>, EVEX_B;
+ } // Constraints = "$src1 = $dst"
+}
+
+multiclass avx512_fixupimm_packed_sae<bits<8> opc, string OpcodeStr,
+ SDNode OpNode, X86VectorVTInfo _>{
+let Constraints = "$src1 = $dst" in {
+ defm rrib : AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4),
+ OpcodeStr##_.Suffix, "$src4, {sae}, $src3, $src2",
+ "$src2, $src3, {sae}, $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_.IntVT _.RC:$src3),
+ (i32 imm:$src4),
+ (i32 FROUND_NO_EXC))>, EVEX_B;
+ }
+}
+
+multiclass avx512_fixupimm_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ X86VectorVTInfo _, X86VectorVTInfo _src3VT> {
+ let Constraints = "$src1 = $dst" , Predicates = [HasAVX512] in {
+ defm rri : AVX512_maskable_3src_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4),
+ OpcodeStr##_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_src3VT.VT _src3VT.RC:$src3),
+ (i32 imm:$src4),
+ (i32 FROUND_CURRENT))>;
+
+ defm rrib : AVX512_maskable_3src_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4),
+ OpcodeStr##_.Suffix, "$src4, {sae}, $src3, $src2",
+ "$src2, $src3, {sae}, $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_src3VT.VT _src3VT.RC:$src3),
+ (i32 imm:$src4),
+ (i32 FROUND_NO_EXC))>, EVEX_B;
+ defm rmi : AVX512_maskable_3src_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.ScalarMemOp:$src3, i32u8imm:$src4),
+ OpcodeStr##_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_src3VT.VT (scalar_to_vector
+ (_src3VT.ScalarLdFrag addr:$src3))),
+ (i32 imm:$src4),
+ (i32 FROUND_CURRENT))>;
+ }
+}
+
+multiclass avx512_fixupimm_packed_all<AVX512VLVectorVTInfo _Vec>{
+ let Predicates = [HasAVX512] in
+ defm Z : avx512_fixupimm_packed<0x54, "vfixupimm", X86VFixupimm, _Vec.info512>,
+ avx512_fixupimm_packed_sae<0x54, "vfixupimm", X86VFixupimm, _Vec.info512>,
+ AVX512AIi8Base, EVEX_4V, EVEX_V512;
+ let Predicates = [HasAVX512, HasVLX] in {
+ defm Z128 : avx512_fixupimm_packed<0x54, "vfixupimm", X86VFixupimm, _Vec.info128>,
+ AVX512AIi8Base, EVEX_4V, EVEX_V128;
+ defm Z256 : avx512_fixupimm_packed<0x54, "vfixupimm", X86VFixupimm, _Vec.info256>,
+ AVX512AIi8Base, EVEX_4V, EVEX_V256;
+ }
+}
+
+defm VFIXUPIMMSS : avx512_fixupimm_scalar<0x55, "vfixupimm", X86VFixupimmScalar,
+ f32x_info, v4i32x_info>,
+ AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<32, CD8VT1>;
+defm VFIXUPIMMSD : avx512_fixupimm_scalar<0x55, "vfixupimm", X86VFixupimmScalar,
+ f64x_info, v2i64x_info>,
+ AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<64, CD8VT1>, VEX_W;
+defm VFIXUPIMMPS : avx512_fixupimm_packed_all<avx512vl_f32_info>,
+ EVEX_CD8<32, CD8VF>;
+defm VFIXUPIMMPD : avx512_fixupimm_packed_all<avx512vl_f64_info>,
+ EVEX_CD8<64, CD8VF>, VEX_W;
diff --git a/lib/Target/X86/X86InstrBuilder.h b/lib/Target/X86/X86InstrBuilder.h
index 787f15bc628e5..bcea6fa803505 100644
--- a/lib/Target/X86/X86InstrBuilder.h
+++ b/lib/Target/X86/X86InstrBuilder.h
@@ -83,6 +83,34 @@ struct X86AddressMode {
}
};
+/// Compute the addressing mode from an machine instruction starting with the
+/// given operand.
+static inline X86AddressMode getAddressFromInstr(MachineInstr *MI,
+ unsigned Operand) {
+ X86AddressMode AM;
+ MachineOperand &Op = MI->getOperand(Operand);
+ if (Op.isReg()) {
+ AM.BaseType = X86AddressMode::RegBase;
+ AM.Base.Reg = Op.getReg();
+ } else {
+ AM.BaseType = X86AddressMode::FrameIndexBase;
+ AM.Base.FrameIndex = Op.getIndex();
+ }
+ Op = MI->getOperand(Operand + 1);
+ if (Op.isImm())
+ AM.Scale = Op.getImm();
+ Op = MI->getOperand(Operand + 2);
+ if (Op.isImm())
+ AM.IndexReg = Op.getImm();
+ Op = MI->getOperand(Operand + 3);
+ if (Op.isGlobal()) {
+ AM.GV = Op.getGlobal();
+ } else {
+ AM.Disp = Op.getImm();
+ }
+ return AM;
+}
+
/// addDirectMem - This function is used to add a direct memory reference to the
/// current instruction -- that is, a dereference of an address in a register,
/// with no scale, index or displacement. An example is: DWORD PTR [EAX].
@@ -151,7 +179,7 @@ addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0) {
MachineFunction &MF = *MI->getParent()->getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
const MCInstrDesc &MCID = MI->getDesc();
- unsigned Flags = 0;
+ auto Flags = MachineMemOperand::MONone;
if (MCID.mayLoad())
Flags |= MachineMemOperand::MOLoad;
if (MCID.mayStore())
diff --git a/lib/Target/X86/X86InstrCompiler.td b/lib/Target/X86/X86InstrCompiler.td
index c709c8aca9faf..925f4efb5aa9b 100644
--- a/lib/Target/X86/X86InstrCompiler.td
+++ b/lib/Target/X86/X86InstrCompiler.td
@@ -99,18 +99,6 @@ def VAARG_64 : I<0, Pseudo,
(X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
(implicit EFLAGS)]>;
-// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
-// targets. These calls are needed to probe the stack when allocating more than
-// 4k bytes in one go. Touching the stack at 4K increments is necessary to
-// ensure that the guard pages used by the OS virtual memory manager are
-// allocated in correct sequence.
-// The main point of having separate instruction are extra unmodelled effects
-// (compared to ordinary calls) like stack pointer change.
-
-let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
- def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),
- "# dynamic stack allocation",
- [(X86WinAlloca)]>;
// When using segmented stacks these are lowered into instructions which first
// check if the current stacklet has enough free memory. If it does, memory is
@@ -132,6 +120,27 @@ def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
Requires<[In64BitMode]>;
}
+// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
+// targets. These calls are needed to probe the stack when allocating more than
+// 4k bytes in one go. Touching the stack at 4K increments is necessary to
+// ensure that the guard pages used by the OS virtual memory manager are
+// allocated in correct sequence.
+// The main point of having separate instruction are extra unmodelled effects
+// (compared to ordinary calls) like stack pointer change.
+
+let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
+def WIN_ALLOCA_32 : I<0, Pseudo, (outs), (ins GR32:$size),
+ "# dynamic stack allocation",
+ [(X86WinAlloca GR32:$size)]>,
+ Requires<[NotLP64]>;
+
+let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
+def WIN_ALLOCA_64 : I<0, Pseudo, (outs), (ins GR64:$size),
+ "# dynamic stack allocation",
+ [(X86WinAlloca GR64:$size)]>,
+ Requires<[In64BitMode]>;
+
+
//===----------------------------------------------------------------------===//
// EH Pseudo Instructions
//
@@ -250,7 +259,7 @@ def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
// Alias instruction mapping movr0 to xor.
// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
- isPseudo = 1 in
+ isPseudo = 1, AddedComplexity = 20 in
def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
[(set GR32:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>;
@@ -263,7 +272,7 @@ def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)> {
}
let Predicates = [OptForSize, NotSlowIncDec, Not64BitMode],
- AddedComplexity = 1 in {
+ AddedComplexity = 15 in {
// Pseudo instructions for materializing 1 and -1 using XOR+INC/DEC,
// which only require 3 bytes compared to MOV32ri which requires 5.
let Defs = [EFLAGS], isReMaterializable = 1, isPseudo = 1 in {
@@ -278,6 +287,17 @@ let Predicates = [OptForSize, NotSlowIncDec, Not64BitMode],
def : Pat<(i16 -1), (EXTRACT_SUBREG (MOV32r_1), sub_16bit)>;
}
+let isReMaterializable = 1, isPseudo = 1, AddedComplexity = 10 in {
+// AddedComplexity higher than MOV64ri but lower than MOV32r0 and MOV32r1.
+// FIXME: Add itinerary class and Schedule.
+def MOV32ImmSExti8 : I<0, Pseudo, (outs GR32:$dst), (ins i32i8imm:$src), "",
+ [(set GR32:$dst, i32immSExt8:$src)]>,
+ Requires<[OptForMinSize, NotWin64WithoutFP]>;
+def MOV64ImmSExti8 : I<0, Pseudo, (outs GR64:$dst), (ins i64i8imm:$src), "",
+ [(set GR64:$dst, i64immSExt8:$src)]>,
+ Requires<[OptForMinSize, NotWin64WithoutFP]>;
+}
+
// Materialize i64 constant where top 32-bits are zero. This could theoretically
// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
// that would make it more difficult to rematerialize.
@@ -479,10 +499,13 @@ def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
[(X86TLSCall addr:$sym)]>,
Requires<[Not64BitMode]>;
-// For x86_64, the address of the thunk is passed in %rdi, on return
-// the address of the variable is in %rax. All other registers are preserved.
+// For x86_64, the address of the thunk is passed in %rdi, but the
+// pseudo directly use the symbol, so do not add an implicit use of
+// %rdi. The lowering will do the right thing with RDI.
+// On return the address of the variable is in %rax. All other
+// registers are preserved.
let Defs = [RAX, EFLAGS],
- Uses = [RSP, RDI],
+ Uses = [RSP],
usesCustomInserter = 1 in
def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
"# TLSCall_64",
@@ -568,7 +591,7 @@ def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
// ImmOpc8 corresponds to the mi8 version of the instruction
// ImmMod corresponds to the instruction format of the mi and mi8 versions
multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
- Format ImmMod, string mnemonic> {
+ Format ImmMod, SDPatternOperator Op, string mnemonic> {
let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
SchedRW = [WriteALULd, WriteRMW] in {
@@ -577,106 +600,124 @@ def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
!strconcat(mnemonic, "{b}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_NONMEM>, LOCK;
+ [(set EFLAGS, (Op addr:$dst, GR8:$src2))],
+ IIC_ALU_NONMEM>, LOCK;
+
def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
!strconcat(mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_NONMEM>, OpSize16, LOCK;
+ [(set EFLAGS, (Op addr:$dst, GR16:$src2))],
+ IIC_ALU_NONMEM>, OpSize16, LOCK;
+
def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
!strconcat(mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_NONMEM>, OpSize32, LOCK;
+ [(set EFLAGS, (Op addr:$dst, GR32:$src2))],
+ IIC_ALU_NONMEM>, OpSize32, LOCK;
+
def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
!strconcat(mnemonic, "{q}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_NONMEM>, LOCK;
+ [(set EFLAGS, (Op addr:$dst, GR64:$src2))],
+ IIC_ALU_NONMEM>, LOCK;
def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
!strconcat(mnemonic, "{b}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, LOCK;
+ [(set EFLAGS, (Op addr:$dst, (i8 imm:$src2)))],
+ IIC_ALU_MEM>, LOCK;
def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
!strconcat(mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, OpSize16, LOCK;
+ [(set EFLAGS, (Op addr:$dst, (i16 imm:$src2)))],
+ IIC_ALU_MEM>, OpSize16, LOCK;
def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
!strconcat(mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, OpSize32, LOCK;
+ [(set EFLAGS, (Op addr:$dst, (i32 imm:$src2)))],
+ IIC_ALU_MEM>, OpSize32, LOCK;
def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
!strconcat(mnemonic, "{q}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, LOCK;
+ [(set EFLAGS, (Op addr:$dst, i64immSExt32:$src2))],
+ IIC_ALU_MEM>, LOCK;
def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
!strconcat(mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, OpSize16, LOCK;
+ [(set EFLAGS, (Op addr:$dst, i16immSExt8:$src2))],
+ IIC_ALU_MEM>, OpSize16, LOCK;
+
def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
!strconcat(mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, OpSize32, LOCK;
+ [(set EFLAGS, (Op addr:$dst, i32immSExt8:$src2))],
+ IIC_ALU_MEM>, OpSize32, LOCK;
+
def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
!strconcat(mnemonic, "{q}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, LOCK;
+ [(set EFLAGS, (Op addr:$dst, i64immSExt8:$src2))],
+ IIC_ALU_MEM>, LOCK;
}
}
-defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, "add">;
-defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, "sub">;
-defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, "or">;
-defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">;
-defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">;
+defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, X86lock_add, "add">;
+defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, X86lock_sub, "sub">;
+defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, X86lock_or , "or">;
+defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">;
+defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">;
-// Optimized codegen when the non-memory output is not used.
multiclass LOCK_ArithUnOp<bits<8> Opc8, bits<8> Opc, Format Form,
- string mnemonic> {
+ int Increment, string mnemonic> {
let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
- SchedRW = [WriteALULd, WriteRMW] in {
-
+ SchedRW = [WriteALULd, WriteRMW], Predicates = [NotSlowIncDec] in {
def NAME#8m : I<Opc8, Form, (outs), (ins i8mem :$dst),
!strconcat(mnemonic, "{b}\t$dst"),
- [], IIC_UNARY_MEM>, LOCK;
+ [(set EFLAGS, (X86lock_add addr:$dst, (i8 Increment)))],
+ IIC_UNARY_MEM>, LOCK;
def NAME#16m : I<Opc, Form, (outs), (ins i16mem:$dst),
!strconcat(mnemonic, "{w}\t$dst"),
- [], IIC_UNARY_MEM>, OpSize16, LOCK;
+ [(set EFLAGS, (X86lock_add addr:$dst, (i16 Increment)))],
+ IIC_UNARY_MEM>, OpSize16, LOCK;
def NAME#32m : I<Opc, Form, (outs), (ins i32mem:$dst),
!strconcat(mnemonic, "{l}\t$dst"),
- [], IIC_UNARY_MEM>, OpSize32, LOCK;
+ [(set EFLAGS, (X86lock_add addr:$dst, (i32 Increment)))],
+ IIC_UNARY_MEM>, OpSize32, LOCK;
def NAME#64m : RI<Opc, Form, (outs), (ins i64mem:$dst),
!strconcat(mnemonic, "{q}\t$dst"),
- [], IIC_UNARY_MEM>, LOCK;
+ [(set EFLAGS, (X86lock_add addr:$dst, (i64 Increment)))],
+ IIC_UNARY_MEM>, LOCK;
}
}
-defm LOCK_INC : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, "inc">;
-defm LOCK_DEC : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, "dec">;
+defm LOCK_INC : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, 1, "inc">;
+defm LOCK_DEC : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, -1, "dec">;
// Atomic compare and swap.
multiclass LCMPXCHG_UnOp<bits<8> Opc, Format Form, string mnemonic,
@@ -719,6 +760,38 @@ defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b",
IIC_CMPX_LOCK_8B>;
}
+// This pseudo must be used when the frame uses RBX as
+// the base pointer. Indeed, in such situation RBX is a reserved
+// register and the register allocator will ignore any use/def of
+// it. In other words, the register will not fix the clobbering of
+// RBX that will happen when setting the arguments for the instrucion.
+//
+// Unlike the actual related instuction, we mark that this one
+// defines EBX (instead of using EBX).
+// The rationale is that we will define RBX during the expansion of
+// the pseudo. The argument feeding EBX is ebx_input.
+//
+// The additional argument, $ebx_save, is a temporary register used to
+// save the value of RBX accross the actual instruction.
+//
+// To make sure the register assigned to $ebx_save does not interfere with
+// the definition of the actual instruction, we use a definition $dst which
+// is tied to $rbx_save. That way, the live-range of $rbx_save spans accross
+// the instruction and we are sure we will have a valid register to restore
+// the value of RBX.
+let Defs = [EAX, EDX, EBX, EFLAGS], Uses = [EAX, ECX, EDX],
+ SchedRW = [WriteALULd, WriteRMW], isCodeGenOnly = 1, isPseudo = 1,
+ Constraints = "$ebx_save = $dst", usesCustomInserter = 1 in {
+def LCMPXCHG8B_SAVE_EBX :
+ I<0, Pseudo, (outs GR32:$dst),
+ (ins i64mem:$ptr, GR32:$ebx_input, GR32:$ebx_save),
+ !strconcat("cmpxchg8b", "\t$ptr"),
+ [(set GR32:$dst, (X86cas8save_ebx addr:$ptr, GR32:$ebx_input,
+ GR32:$ebx_save))],
+ IIC_CMPX_LOCK_8B>;
+}
+
+
let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
Predicates = [HasCmpxchg16b], SchedRW = [WriteALULd, WriteRMW] in {
defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b",
@@ -726,6 +799,20 @@ defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b",
IIC_CMPX_LOCK_16B>, REX_W;
}
+// Same as LCMPXCHG8B_SAVE_RBX but for the 16 Bytes variant.
+let Defs = [RAX, RDX, RBX, EFLAGS], Uses = [RAX, RCX, RDX],
+ Predicates = [HasCmpxchg16b], SchedRW = [WriteALULd, WriteRMW],
+ isCodeGenOnly = 1, isPseudo = 1, Constraints = "$rbx_save = $dst",
+ usesCustomInserter = 1 in {
+def LCMPXCHG16B_SAVE_RBX :
+ I<0, Pseudo, (outs GR64:$dst),
+ (ins i128mem:$ptr, GR64:$rbx_input, GR64:$rbx_save),
+ !strconcat("cmpxchg16b", "\t$ptr"),
+ [(set GR64:$dst, (X86cas16save_rbx addr:$ptr, GR64:$rbx_input,
+ GR64:$rbx_save))],
+ IIC_CMPX_LOCK_16B>;
+}
+
defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg",
X86cas, IIC_CMPX_LOCK_8, IIC_CMPX_LOCK>;
@@ -926,6 +1013,18 @@ def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src),
// DAG Pattern Matching Rules
//===----------------------------------------------------------------------===//
+// Use AND/OR to store 0/-1 in memory when optimizing for minsize. This saves
+// binary size compared to a regular MOV, but it introduces an unnecessary
+// load, so is not suitable for regular or optsize functions.
+let Predicates = [OptForMinSize] in {
+def : Pat<(store (i16 0), addr:$dst), (AND16mi8 addr:$dst, 0)>;
+def : Pat<(store (i32 0), addr:$dst), (AND32mi8 addr:$dst, 0)>;
+def : Pat<(store (i64 0), addr:$dst), (AND64mi8 addr:$dst, 0)>;
+def : Pat<(store (i16 -1), addr:$dst), (OR16mi8 addr:$dst, -1)>;
+def : Pat<(store (i32 -1), addr:$dst), (OR32mi8 addr:$dst, -1)>;
+def : Pat<(store (i64 -1), addr:$dst), (OR64mi8 addr:$dst, -1)>;
+}
+
// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
@@ -994,22 +1093,22 @@ def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
// for MOV64mi32 should handle this sort of thing.
def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tconstpool:$src)>,
- Requires<[NearData, IsStatic]>;
+ Requires<[NearData, IsNotPIC]>;
def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tjumptable:$src)>,
- Requires<[NearData, IsStatic]>;
+ Requires<[NearData, IsNotPIC]>;
def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tglobaladdr:$src)>,
- Requires<[NearData, IsStatic]>;
+ Requires<[NearData, IsNotPIC]>;
def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
(MOV64mi32 addr:$dst, texternalsym:$src)>,
- Requires<[NearData, IsStatic]>;
+ Requires<[NearData, IsNotPIC]>;
def : Pat<(store (i64 (X86Wrapper mcsym:$src)), addr:$dst),
(MOV64mi32 addr:$dst, mcsym:$src)>,
- Requires<[NearData, IsStatic]>;
+ Requires<[NearData, IsNotPIC]>;
def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tblockaddress:$src)>,
- Requires<[NearData, IsStatic]>;
+ Requires<[NearData, IsNotPIC]>;
def : Pat<(i32 (X86RecoverFrameAlloc mcsym:$dst)), (MOV32ri mcsym:$dst)>;
def : Pat<(i64 (X86RecoverFrameAlloc mcsym:$dst)), (MOV64ri mcsym:$dst)>;
@@ -1139,12 +1238,13 @@ defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;
defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
// zextload bool -> zextload byte
-def : Pat<(zextloadi8i1 addr:$src), (AND8ri (MOV8rm addr:$src), (i8 1))>;
-def : Pat<(zextloadi16i1 addr:$src), (AND16ri8 (MOVZX16rm8 addr:$src), (i16 1))>;
-def : Pat<(zextloadi32i1 addr:$src), (AND32ri8 (MOVZX32rm8 addr:$src), (i32 1))>;
+// i1 stored in one byte in zero-extended form.
+// Upper bits cleanup should be executed before Store.
+def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
+def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
+def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
def : Pat<(zextloadi64i1 addr:$src),
- (SUBREG_TO_REG (i64 0),
- (AND32ri8 (MOVZX32rm8 addr:$src), (i32 1)), sub_32bit)>;
+ (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
// extload bool -> extload byte
// When extloading from 16-bit and smaller memory locations into 64-bit
@@ -1305,7 +1405,7 @@ def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
// instructions.
def : Pat<(add GR64:$src1, 0x0000000080000000),
(SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
-def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
+def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst),
(SUB64mi32 addr:$dst, 0xffffffff80000000)>;
// To avoid needing to materialize an immediate in a register, use a 32-bit and
@@ -1450,6 +1550,10 @@ def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
sub_8bit_hi)>,
Requires<[Not64BitMode]>;
+def : Pat<(i8 (trunc (srl_su (i32 (anyext GR16:$src)), (i8 8)))),
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)>,
+ Requires<[Not64BitMode]>;
def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
(EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
sub_8bit_hi)>,
diff --git a/lib/Target/X86/X86InstrControl.td b/lib/Target/X86/X86InstrControl.td
index 8c351a51c460e..bb5f9117f032f 100644
--- a/lib/Target/X86/X86InstrControl.td
+++ b/lib/Target/X86/X86InstrControl.td
@@ -22,21 +22,21 @@
let isTerminator = 1, isReturn = 1, isBarrier = 1,
hasCtrlDep = 1, FPForm = SpecialFP, SchedRW = [WriteJumpLd] in {
def RETL : I <0xC3, RawFrm, (outs), (ins variable_ops),
- "ret{l}", [(X86retflag 0)], IIC_RET>, OpSize32,
+ "ret{l}", [], IIC_RET>, OpSize32,
Requires<[Not64BitMode]>;
def RETQ : I <0xC3, RawFrm, (outs), (ins variable_ops),
- "ret{q}", [(X86retflag 0)], IIC_RET>, OpSize32,
+ "ret{q}", [], IIC_RET>, OpSize32,
Requires<[In64BitMode]>;
def RETW : I <0xC3, RawFrm, (outs), (ins),
"ret{w}",
[], IIC_RET>, OpSize16;
def RETIL : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
"ret{l}\t$amt",
- [(X86retflag timm:$amt)], IIC_RET_IMM>, OpSize32,
+ [], IIC_RET_IMM>, OpSize32,
Requires<[Not64BitMode]>;
def RETIQ : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
"ret{q}\t$amt",
- [(X86retflag timm:$amt)], IIC_RET_IMM>, OpSize32,
+ [], IIC_RET_IMM>, OpSize32,
Requires<[In64BitMode]>;
def RETIW : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt),
"ret{w}\t$amt",
@@ -64,8 +64,8 @@ let isTerminator = 1, isReturn = 1, isBarrier = 1,
def IRET64 : RI <0xcf, RawFrm, (outs), (ins), "iretq", [],
IIC_IRET>, Requires<[In64BitMode]>;
let isCodeGenOnly = 1 in
- def IRET : PseudoI<(outs), (ins i16imm:$adj), [(X86iret timm:$adj)]>;
-
+ def IRET : PseudoI<(outs), (ins i32imm:$adj), [(X86iret timm:$adj)]>;
+ def RET : PseudoI<(outs), (ins i32imm:$adj, variable_ops), [(X86retflag timm:$adj)]>;
}
// Unconditional branches.
diff --git a/lib/Target/X86/X86InstrFPStack.td b/lib/Target/X86/X86InstrFPStack.td
index 03ae21125b0e8..078dab41502ac 100644
--- a/lib/Target/X86/X86InstrFPStack.td
+++ b/lib/Target/X86/X86InstrFPStack.td
@@ -326,7 +326,7 @@ def FCOM32m : FPI<0xD8, MRM2m, (outs), (ins f32mem:$src), "fcom{s}\t$src">;
def FCOMP32m : FPI<0xD8, MRM3m, (outs), (ins f32mem:$src), "fcomp{s}\t$src">;
def FLDENVm : FPI<0xD9, MRM4m, (outs), (ins f32mem:$src), "fldenv\t$src">;
-def FSTENVm : FPI<0xD9, MRM6m, (outs f32mem:$dst), (ins), "fnstenv\t$dst">;
+def FSTENVm : FPI<0xD9, MRM6m, (outs), (ins f32mem:$dst), "fnstenv\t$dst">;
def FICOM32m : FPI<0xDA, MRM2m, (outs), (ins i32mem:$src), "ficom{l}\t$src">;
def FICOMP32m: FPI<0xDA, MRM3m, (outs), (ins i32mem:$src), "ficomp{l}\t$src">;
@@ -334,15 +334,15 @@ def FICOMP32m: FPI<0xDA, MRM3m, (outs), (ins i32mem:$src), "ficomp{l}\t$src">;
def FCOM64m : FPI<0xDC, MRM2m, (outs), (ins f64mem:$src), "fcom{l}\t$src">;
def FCOMP64m : FPI<0xDC, MRM3m, (outs), (ins f64mem:$src), "fcomp{l}\t$src">;
-def FRSTORm : FPI<0xDD, MRM4m, (outs f32mem:$dst), (ins), "frstor\t$dst">;
-def FSAVEm : FPI<0xDD, MRM6m, (outs f32mem:$dst), (ins), "fnsave\t$dst">;
-def FNSTSWm : FPI<0xDD, MRM7m, (outs i16mem:$dst), (ins), "fnstsw\t$dst">;
+def FRSTORm : FPI<0xDD, MRM4m, (outs), (ins f32mem:$dst), "frstor\t$dst">;
+def FSAVEm : FPI<0xDD, MRM6m, (outs), (ins f32mem:$dst), "fnsave\t$dst">;
+def FNSTSWm : FPI<0xDD, MRM7m, (outs), (ins i16mem:$dst), "fnstsw\t$dst">;
def FICOM16m : FPI<0xDE, MRM2m, (outs), (ins i16mem:$src), "ficom{s}\t$src">;
def FICOMP16m: FPI<0xDE, MRM3m, (outs), (ins i16mem:$src), "ficomp{s}\t$src">;
def FBLDm : FPI<0xDF, MRM4m, (outs), (ins f80mem:$src), "fbld\t$src">;
-def FBSTPm : FPI<0xDF, MRM6m, (outs f80mem:$dst), (ins), "fbstp\t$dst">;
+def FBSTPm : FPI<0xDF, MRM6m, (outs), (ins f80mem:$dst), "fbstp\t$dst">;
// Floating point cmovs.
class FpIf32CMov<dag outs, dag ins, FPFormat fp, list<dag> pattern> :
diff --git a/lib/Target/X86/X86InstrFormats.td b/lib/Target/X86/X86InstrFormats.td
index e2fa295c0230a..5183adc834b1e 100644
--- a/lib/Target/X86/X86InstrFormats.td
+++ b/lib/Target/X86/X86InstrFormats.td
@@ -845,7 +845,7 @@ class AVXPCLMULIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
class FMA3<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, asm, pattern, itin>, T8PD,
- VEX_4V, FMASC, Requires<[HasFMA]>;
+ VEX_4V, FMASC, Requires<[HasFMA, NoVLX]>;
// FMA4 Instruction Templates
class FMA4<bits<8> o, Format F, dag outs, dag ins, string asm,
diff --git a/lib/Target/X86/X86InstrFragmentsSIMD.td b/lib/Target/X86/X86InstrFragmentsSIMD.td
index 643286324e250..ea54f049ec7a2 100644
--- a/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -35,7 +35,7 @@ def bc_mmx : PatFrag<(ops node:$in), (x86mmx (bitconvert node:$in))>;
// SSE specific DAG Nodes.
//===----------------------------------------------------------------------===//
-def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
+def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisFP<0>, SDTCisSameAs<1, 2>,
SDTCisFP<1>, SDTCisVT<3, i8>,
SDTCisVec<1>]>;
def SDTX86CmpTestSae : SDTypeProfile<1, 3, [SDTCisVT<0, i32>,
@@ -60,9 +60,8 @@ def X86fandn : SDNode<"X86ISD::FANDN", SDTFPBinOp,
[SDNPCommutative, SDNPAssociative]>;
def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
-def X86frsqrt14s: SDNode<"X86ISD::FRSQRT", SDTFPBinOp>;
-def X86frcp14s : SDNode<"X86ISD::FRCP", SDTFPBinOp>;
-def X86fgetsign: SDNode<"X86ISD::FGETSIGNx86",SDTFPToIntOp>;
+def X86frsqrt14s: SDNode<"X86ISD::FRSQRTS", SDTFPBinOp>;
+def X86frcp14s : SDNode<"X86ISD::FRCPS", SDTFPBinOp>;
def X86fhadd : SDNode<"X86ISD::FHADD", SDTFPBinOp>;
def X86fhsub : SDNode<"X86ISD::FHSUB", SDTFPBinOp>;
def X86hadd : SDNode<"X86ISD::HADD", SDTIntBinOp>;
@@ -72,7 +71,6 @@ def X86comiSae : SDNode<"X86ISD::COMI", SDTX86CmpTestSae>;
def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
def X86ucomiSae: SDNode<"X86ISD::UCOMI", SDTX86CmpTestSae>;
def X86cmps : SDNode<"X86ISD::FSETCC", SDTX86Cmps>;
-//def X86cmpsd : SDNode<"X86ISD::FSETCCsd", SDTX86Cmpsd>;
def X86cvtdq2pd: SDNode<"X86ISD::CVTDQ2PD",
SDTypeProfile<1, 1, [SDTCisVT<0, v2f64>,
SDTCisVT<1, v4i32>]>>;
@@ -95,9 +93,9 @@ def X86dbpsadbw : SDNode<"X86ISD::DBPSADBW",
def X86andnp : SDNode<"X86ISD::ANDNP",
SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>]>>;
-def X86psign : SDNode<"X86ISD::PSIGN",
- SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
- SDTCisSameAs<0,2>]>>;
+def X86multishift : SDNode<"X86ISD::MULTISHIFT",
+ SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
+ SDTCisSameAs<1,2>]>>;
def X86pextrb : SDNode<"X86ISD::PEXTRB",
SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisVT<1, v16i8>,
SDTCisPtrTy<2>]>>;
@@ -137,46 +135,39 @@ def X86vtrunc : SDNode<"X86ISD::VTRUNC", SDTVtrunc>;
def X86vtruncs : SDNode<"X86ISD::VTRUNCS", SDTVtrunc>;
def X86vtruncus : SDNode<"X86ISD::VTRUNCUS", SDTVtrunc>;
-def X86trunc : SDNode<"X86ISD::TRUNC",
- SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisInt<1>,
- SDTCisOpSmallerThanOp<0, 1>]>>;
def X86vfpext : SDNode<"X86ISD::VFPEXT",
- SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<0>, SDTCisFP<1>,
- SDTCisOpSmallerThanOp<1, 0>]>>;
+ SDTypeProfile<1, 1, [SDTCVecEltisVT<0, f64>,
+ SDTCVecEltisVT<1, f32>,
+ SDTCisSameSizeAs<0, 1>]>>;
def X86vfpround: SDNode<"X86ISD::VFPROUND",
- SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<0>, SDTCisFP<1>,
- SDTCisOpSmallerThanOp<0, 1>]>>;
+ SDTypeProfile<1, 1, [SDTCVecEltisVT<0, f32>,
+ SDTCVecEltisVT<1, f64>,
+ SDTCisSameSizeAs<0, 1>]>>;
def X86fround: SDNode<"X86ISD::VFPROUND",
- SDTypeProfile<1, 2, [SDTCisFP<0>, SDTCisFP<1>,SDTCisFP<2>,
- SDTCVecEltisVT<0, f32>,
- SDTCVecEltisVT<1, f64>,
+ SDTypeProfile<1, 2, [SDTCVecEltisVT<0, f32>,
+ SDTCisSameAs<0, 1>,
SDTCVecEltisVT<2, f64>,
- SDTCisOpSmallerThanOp<0, 1>]>>;
+ SDTCisSameSizeAs<0, 2>]>>;
def X86froundRnd: SDNode<"X86ISD::VFPROUND",
- SDTypeProfile<1, 3, [SDTCisFP<0>, SDTCisFP<1>,SDTCisFP<2>,
- SDTCVecEltisVT<0, f32>,
- SDTCVecEltisVT<1, f64>,
+ SDTypeProfile<1, 3, [SDTCVecEltisVT<0, f32>,
+ SDTCisSameAs<0, 1>,
SDTCVecEltisVT<2, f64>,
- SDTCisOpSmallerThanOp<0, 1>,
- SDTCisInt<3>]>>;
+ SDTCisSameSizeAs<0, 2>,
+ SDTCisVT<3, i32>]>>;
def X86fpext : SDNode<"X86ISD::VFPEXT",
- SDTypeProfile<1, 2, [SDTCisFP<0>, SDTCisFP<1>,SDTCisFP<2>,
- SDTCVecEltisVT<0, f64>,
- SDTCVecEltisVT<1, f32>,
+ SDTypeProfile<1, 2, [SDTCVecEltisVT<0, f64>,
+ SDTCisSameAs<0, 1>,
SDTCVecEltisVT<2, f32>,
- SDTCisOpSmallerThanOp<1, 0>]>>;
+ SDTCisSameSizeAs<0, 2>]>>;
def X86fpextRnd : SDNode<"X86ISD::VFPEXT",
- SDTypeProfile<1, 3, [SDTCisFP<0>, SDTCisFP<1>,SDTCisFP<2>,
- SDTCVecEltisVT<0, f64>,
- SDTCVecEltisVT<1, f32>,
+ SDTypeProfile<1, 3, [SDTCVecEltisVT<0, f64>,
+ SDTCisSameAs<0, 1>,
SDTCVecEltisVT<2, f32>,
- SDTCisOpSmallerThanOp<1, 0>,
- SDTCisInt<3>]>>;
+ SDTCisSameSizeAs<0, 2>,
+ SDTCisVT<3, i32>]>>;
def X86vshldq : SDNode<"X86ISD::VSHLDQ", SDTIntShiftOp>;
def X86vshrdq : SDNode<"X86ISD::VSRLDQ", SDTIntShiftOp>;
@@ -221,6 +212,8 @@ def X86vsra : SDNode<"X86ISD::VSRA",
SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisVec<2>]>>;
+def X86vsrav : SDNode<"X86ISD::VSRAV" , SDTIntShiftOp>;
+
def X86vshli : SDNode<"X86ISD::VSHLI", SDTIntShiftOp>;
def X86vsrli : SDNode<"X86ISD::VSRLI", SDTIntShiftOp>;
def X86vsrai : SDNode<"X86ISD::VSRAI", SDTIntShiftOp>;
@@ -250,10 +243,24 @@ def X86vpcomu : SDNode<"X86ISD::VPCOMU",
SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>,
SDTCisVT<3, i8>]>>;
+def X86vpermil2 : SDNode<"X86ISD::VPERMIL2",
+ SDTypeProfile<1, 4, [SDTCisVec<0>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>,
+ SDTCisSameSizeAs<0,3>,
+ SDTCisSameNumEltsAs<0, 3>,
+ SDTCisVT<4, i8>]>>;
+def X86vpperm : SDNode<"X86ISD::VPPERM",
+ SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>]>>;
def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
SDTCisVec<1>,
SDTCisSameAs<2, 1>]>;
+
+def SDTX86Testm : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
+ SDTCisSameAs<2, 1>, SDTCVecEltisVT<0, i1>,
+ SDTCisSameNumEltsAs<0, 1>]>;
+
def X86addus : SDNode<"X86ISD::ADDUS", SDTIntBinOp>;
def X86subus : SDNode<"X86ISD::SUBUS", SDTIntBinOp>;
def X86adds : SDNode<"X86ISD::ADDS", SDTIntBinOp>;
@@ -264,15 +271,22 @@ def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
def X86kortest : SDNode<"X86ISD::KORTEST", SDTX86CmpPTest>;
def X86ktest : SDNode<"X86ISD::KTEST", SDTX86CmpPTest>;
-def X86testm : SDNode<"X86ISD::TESTM", SDTypeProfile<1, 2, [SDTCisVec<0>,
- SDTCisVec<1>, SDTCisSameAs<2, 1>,
- SDTCVecEltisVT<0, i1>,
- SDTCisSameNumEltsAs<0, 1>]>>;
-def X86testnm : SDNode<"X86ISD::TESTNM", SDTypeProfile<1, 2, [SDTCisVec<0>,
- SDTCisVec<1>, SDTCisSameAs<2, 1>,
- SDTCVecEltisVT<0, i1>,
- SDTCisSameNumEltsAs<0, 1>]>>;
-def X86select : SDNode<"X86ISD::SELECT" , SDTSelect>;
+def X86testm : SDNode<"X86ISD::TESTM", SDTX86Testm, [SDNPCommutative]>;
+def X86testnm : SDNode<"X86ISD::TESTNM", SDTX86Testm, [SDNPCommutative]>;
+
+def X86movmsk : SDNode<"X86ISD::MOVMSK",
+ SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVec<1>]>>;
+
+def X86select : SDNode<"X86ISD::SELECT",
+ SDTypeProfile<1, 3, [SDTCVecEltisVT<1, i1>,
+ SDTCisSameAs<0, 2>,
+ SDTCisSameAs<2, 3>,
+ SDTCisSameNumEltsAs<0, 1>]>>;
+
+def X86selects : SDNode<"X86ISD::SELECT",
+ SDTypeProfile<1, 3, [SDTCisVT<1, i1>,
+ SDTCisSameAs<0, 2>,
+ SDTCisSameAs<2, 3>]>>;
def X86pmuludq : SDNode<"X86ISD::PMULUDQ",
SDTypeProfile<1, 2, [SDTCVecEltisVT<0, i64>,
@@ -308,9 +322,16 @@ def SDTShuff2OpI : SDTypeProfile<1, 2, [SDTCisVec<0>,
def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>, SDTCisVT<3, i8>]>;
def SDTFPBinOpImmRound: SDTypeProfile<1, 4, [SDTCisVec<0>, SDTCisSameAs<0,1>,
- SDTCisSameAs<0,2>, SDTCisInt<3>, SDTCisInt<4>]>;
+ SDTCisSameAs<0,2>, SDTCisVT<3, i32>, SDTCisVT<4, i32>]>;
+def SDTFPTernaryOpImmRound: SDTypeProfile<1, 5, [SDTCisFP<0>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>,
+ SDTCisInt<3>,
+ SDTCisSameSizeAs<0, 3>,
+ SDTCisSameNumEltsAs<0, 3>,
+ SDTCisVT<4, i32>,
+ SDTCisVT<5, i32>]>;
def SDTFPUnaryOpImmRound: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
- SDTCisInt<2>, SDTCisInt<3>]>;
+ SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
def SDTVBroadcast : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
def SDTVBroadcastm : SDTypeProfile<1, 1, [SDTCisVec<0>,
@@ -324,21 +345,16 @@ def SDTTernlog : SDTypeProfile<1, 4, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisVT<4, i8>]>;
def SDTFPBinOpRound : SDTypeProfile<1, 3, [ // fadd_round, fmul_round, etc.
- SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisFP<0>, SDTCisInt<3>]>;
+ SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisFP<0>, SDTCisVT<3, i32>]>;
def SDTFPUnaryOpRound : SDTypeProfile<1, 2, [ // fsqrt_round, fgetexp_round, etc.
- SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisInt<2>]>;
+ SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisVT<2, i32>]>;
def SDTFma : SDTypeProfile<1, 3, [SDTCisSameAs<0,1>,
SDTCisSameAs<1,2>, SDTCisSameAs<1,3>]>;
def SDTFmaRound : SDTypeProfile<1, 4, [SDTCisSameAs<0,1>,
- SDTCisSameAs<1,2>, SDTCisSameAs<1,3>, SDTCisInt<4>]>;
-def STDFp1SrcRm : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>,
- SDTCisVec<0>, SDTCisVT<2, i32>]>;
-def STDFp2SrcRm : SDTypeProfile<1, 3, [SDTCisSameAs<0,1>,
- SDTCisVec<0>, SDTCisVT<3, i32>]>;
-def STDFp3SrcRm : SDTypeProfile<1, 4, [SDTCisSameAs<0,1>,
- SDTCisVec<0>, SDTCisVT<3, i32>, SDTCisVT<4, i32>]>;
+ SDTCisSameAs<1,2>, SDTCisSameAs<1,3>,
+ SDTCisVT<4, i32>]>;
def X86PAlignr : SDNode<"X86ISD::PALIGNR", SDTShuff3OpI>;
def X86VAlign : SDNode<"X86ISD::VALIGN", SDTShuff3OpI>;
@@ -405,7 +421,8 @@ def X86vpternlog : SDNode<"X86ISD::VPTERNLOG", SDTTernlog>;
def X86VPerm2x128 : SDNode<"X86ISD::VPERM2X128", SDTShuff3OpI>;
-def X86VFixupimm : SDNode<"X86ISD::VFIXUPIMM", SDTFPBinOpImmRound>;
+def X86VFixupimm : SDNode<"X86ISD::VFIXUPIMM", SDTFPTernaryOpImmRound>;
+def X86VFixupimmScalar : SDNode<"X86ISD::VFIXUPIMMS", SDTFPTernaryOpImmRound>;
def X86VRange : SDNode<"X86ISD::VRANGE", SDTFPBinOpImmRound>;
def X86VReduce : SDNode<"X86ISD::VREDUCE", SDTFPUnaryOpImmRound>;
def X86VRndScale : SDNode<"X86ISD::VRNDSCALE", SDTFPUnaryOpImmRound>;
@@ -422,10 +439,6 @@ def X86Vfpclasss : SDNode<"X86ISD::VFPCLASSS",
def X86SubVBroadcast : SDNode<"X86ISD::SUBV_BROADCAST",
SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
SDTCisSubVecOfVec<1, 0>]>, []>;
-// SDTCisSubVecOfVec restriction cannot be applied for 128 bit version of VBROADCASTI32x2.
-def X86SubV32x2Broadcast : SDNode<"X86ISD::SUBV_BROADCAST",
- SDTypeProfile<1, 1, [SDTCisVec<0>,
- SDTCisSameAs<0,1>]>, []>;
def X86VBroadcast : SDNode<"X86ISD::VBROADCAST", SDTVBroadcast>;
def X86VBroadcastm : SDNode<"X86ISD::VBROADCASTM", SDTVBroadcastm>;
@@ -446,11 +459,12 @@ def X86fmulRnd : SDNode<"X86ISD::FMUL_RND", SDTFPBinOpRound>;
def X86fdivRnd : SDNode<"X86ISD::FDIV_RND", SDTFPBinOpRound>;
def X86fmaxRnd : SDNode<"X86ISD::FMAX_RND", SDTFPBinOpRound>;
def X86scalef : SDNode<"X86ISD::SCALEF", SDTFPBinOpRound>;
+def X86scalefs : SDNode<"X86ISD::SCALEFS", SDTFPBinOpRound>;
def X86fminRnd : SDNode<"X86ISD::FMIN_RND", SDTFPBinOpRound>;
def X86fsqrtRnd : SDNode<"X86ISD::FSQRT_RND", SDTFPUnaryOpRound>;
-def X86fsqrtRnds : SDNode<"X86ISD::FSQRT_RND", STDFp2SrcRm>;
+def X86fsqrtRnds : SDNode<"X86ISD::FSQRT_RND", SDTFPBinOpRound>;
def X86fgetexpRnd : SDNode<"X86ISD::FGETEXP_RND", SDTFPUnaryOpRound>;
-def X86fgetexpRnds : SDNode<"X86ISD::FGETEXP_RND", STDFp2SrcRm>;
+def X86fgetexpRnds : SDNode<"X86ISD::FGETEXP_RND", SDTFPBinOpRound>;
def X86Fmadd : SDNode<"X86ISD::FMADD", SDTFma>;
def X86Fnmadd : SDNode<"X86ISD::FNMADD", SDTFma>;
@@ -466,15 +480,18 @@ def X86FnmsubRnd : SDNode<"X86ISD::FNMSUB_RND", SDTFmaRound>;
def X86FmaddsubRnd : SDNode<"X86ISD::FMADDSUB_RND", SDTFmaRound>;
def X86FmsubaddRnd : SDNode<"X86ISD::FMSUBADD_RND", SDTFmaRound>;
-def X86rsqrt28 : SDNode<"X86ISD::RSQRT28", STDFp1SrcRm>;
-def X86rcp28 : SDNode<"X86ISD::RCP28", STDFp1SrcRm>;
-def X86exp2 : SDNode<"X86ISD::EXP2", STDFp1SrcRm>;
+def x86vpmadd52l : SDNode<"X86ISD::VPMADD52L", SDTFma>;
+def x86vpmadd52h : SDNode<"X86ISD::VPMADD52H", SDTFma>;
-def X86rsqrt28s : SDNode<"X86ISD::RSQRT28", STDFp2SrcRm>;
-def X86rcp28s : SDNode<"X86ISD::RCP28", STDFp2SrcRm>;
-def X86RndScales : SDNode<"X86ISD::VRNDSCALE", STDFp3SrcRm>;
-def X86Reduces : SDNode<"X86ISD::VREDUCE", STDFp3SrcRm>;
-def X86GetMants : SDNode<"X86ISD::VGETMANT", STDFp3SrcRm>;
+def X86rsqrt28 : SDNode<"X86ISD::RSQRT28", SDTFPUnaryOpRound>;
+def X86rcp28 : SDNode<"X86ISD::RCP28", SDTFPUnaryOpRound>;
+def X86exp2 : SDNode<"X86ISD::EXP2", SDTFPUnaryOpRound>;
+
+def X86rsqrt28s : SDNode<"X86ISD::RSQRT28", SDTFPBinOpRound>;
+def X86rcp28s : SDNode<"X86ISD::RCP28", SDTFPBinOpRound>;
+def X86RndScales : SDNode<"X86ISD::VRNDSCALE", SDTFPBinOpImmRound>;
+def X86Reduces : SDNode<"X86ISD::VREDUCE", SDTFPBinOpImmRound>;
+def X86GetMants : SDNode<"X86ISD::VGETMANT", SDTFPBinOpImmRound>;
def SDT_PCMPISTRI : SDTypeProfile<2, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
SDTCisVT<2, v16i8>, SDTCisVT<3, v16i8>,
@@ -496,90 +513,62 @@ def SDTintToFPRound: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>,
SDTCisSameAs<0,1>, SDTCisInt<2>,
SDTCisVT<3, i32>]>;
-def SDTDoubleToInt: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisInt<0>, SDTCVecEltisVT<1, f64>]>;
def SDTFloatToInt: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisInt<0>, SDTCVecEltisVT<1, f32>]>;
+ SDTCisInt<0>, SDTCisFP<1>]>;
-def SDTDoubleToIntRnd: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisInt<0>, SDTCVecEltisVT<1, f64>]>;
-def SDTSDoubleToIntRnd: SDTypeProfile<1, 2, [SDTCisInt<0>,SDTCisFP<1>,
- SDTCVecEltisVT<1, f64>, SDTCisInt<2>]>;
def SDTFloatToIntRnd: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisInt<0>, SDTCVecEltisVT<1, f32>]>;
+ SDTCisInt<0>, SDTCisFP<1>,
+ SDTCisVT<2, i32>]>;
def SDTSFloatToIntRnd: SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisFP<1>,
- SDTCVecEltisVT<1, f32>, SDTCisInt<2>]>;
+ SDTCisVec<1>, SDTCisVT<2, i32>]>;
def SDTVintToFPRound: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<0>, SDTCVecEltisVT<1, i32>,
- SDTCisInt<2>]>;
-def SDTVlongToFPRound: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<0>, SDTCVecEltisVT<1, i64>,
- SDTCisInt<2>]>;
-
-def SDTVFPToIntRound: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<1>, SDTCVecEltisVT<0, i32>,
- SDTCisInt<2>]>;
-def SDTVFPToLongRound: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<1>, SDTCVecEltisVT<0, i64>,
- SDTCisInt<2>]>;
+ SDTCisFP<0>, SDTCisInt<1>,
+ SDTCisVT<2, i32>]>;
// Scalar
def X86SintToFpRnd : SDNode<"X86ISD::SINT_TO_FP_RND", SDTintToFPRound>;
def X86UintToFpRnd : SDNode<"X86ISD::UINT_TO_FP_RND", SDTintToFPRound>;
-def X86cvttss2IntRnd : SDNode<"X86ISD::FP_TO_SINT_RND", SDTSFloatToIntRnd>;
-def X86cvttss2UIntRnd : SDNode<"X86ISD::FP_TO_UINT_RND", SDTSFloatToIntRnd>;
-def X86cvttsd2IntRnd : SDNode<"X86ISD::FP_TO_SINT_RND", SDTSDoubleToIntRnd>;
-def X86cvttsd2UIntRnd : SDNode<"X86ISD::FP_TO_UINT_RND", SDTSDoubleToIntRnd>;
+def X86cvtts2IntRnd : SDNode<"X86ISD::FP_TO_SINT_RND", SDTSFloatToIntRnd>;
+def X86cvtts2UIntRnd : SDNode<"X86ISD::FP_TO_UINT_RND", SDTSFloatToIntRnd>;
+
+def X86cvts2si : SDNode<"X86ISD::SCALAR_FP_TO_SINT_RND", SDTSFloatToIntRnd>;
+def X86cvts2usi : SDNode<"X86ISD::SCALAR_FP_TO_UINT_RND", SDTSFloatToIntRnd>;
+
// Vector with rounding mode
// cvtt fp-to-int staff
-def X86VFpToSintRnd : SDNode<"ISD::FP_TO_SINT", SDTVFPToIntRound>;
-def X86VFpToUintRnd : SDNode<"ISD::FP_TO_UINT", SDTVFPToIntRound>;
-def X86VFpToSlongRnd : SDNode<"ISD::FP_TO_SINT", SDTVFPToLongRound>;
-def X86VFpToUlongRnd : SDNode<"ISD::FP_TO_UINT", SDTVFPToLongRound>;
+def X86VFpToSintRnd : SDNode<"ISD::FP_TO_SINT", SDTFloatToIntRnd>;
+def X86VFpToUintRnd : SDNode<"ISD::FP_TO_UINT", SDTFloatToIntRnd>;
def X86VSintToFpRnd : SDNode<"ISD::SINT_TO_FP", SDTVintToFPRound>;
def X86VUintToFpRnd : SDNode<"ISD::UINT_TO_FP", SDTVintToFPRound>;
-def X86VSlongToFpRnd : SDNode<"ISD::SINT_TO_FP", SDTVlongToFPRound>;
-def X86VUlongToFpRnd : SDNode<"ISD::UINT_TO_FP", SDTVlongToFPRound>;
// cvt fp-to-int staff
-def X86cvtps2IntRnd : SDNode<"X86ISD::FP_TO_SINT_RND", SDTFloatToIntRnd>;
-def X86cvtps2UIntRnd : SDNode<"X86ISD::FP_TO_UINT_RND", SDTFloatToIntRnd>;
-def X86cvtpd2IntRnd : SDNode<"X86ISD::FP_TO_SINT_RND", SDTDoubleToIntRnd>;
-def X86cvtpd2UIntRnd : SDNode<"X86ISD::FP_TO_UINT_RND", SDTDoubleToIntRnd>;
+def X86cvtp2IntRnd : SDNode<"X86ISD::FP_TO_SINT_RND", SDTFloatToIntRnd>;
+def X86cvtp2UIntRnd : SDNode<"X86ISD::FP_TO_UINT_RND", SDTFloatToIntRnd>;
// Vector without rounding mode
-def X86cvtps2Int : SDNode<"X86ISD::FP_TO_SINT_RND", SDTFloatToInt>;
-def X86cvtps2UInt : SDNode<"X86ISD::FP_TO_UINT_RND", SDTFloatToInt>;
-def X86cvtpd2Int : SDNode<"X86ISD::FP_TO_SINT_RND", SDTDoubleToInt>;
-def X86cvtpd2UInt : SDNode<"X86ISD::FP_TO_UINT_RND", SDTDoubleToInt>;
+def X86cvtp2Int : SDNode<"X86ISD::FP_TO_SINT_RND", SDTFloatToInt>;
+def X86cvtp2UInt : SDNode<"X86ISD::FP_TO_UINT_RND", SDTFloatToInt>;
def X86cvtph2ps : SDNode<"ISD::FP16_TO_FP",
- SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCVecEltisVT<0, f32>,
+ SDTypeProfile<1, 2, [SDTCVecEltisVT<0, f32>,
SDTCVecEltisVT<1, i16>,
- SDTCisFP<0>,
SDTCisVT<2, i32>]> >;
def X86cvtps2ph : SDNode<"ISD::FP_TO_FP16",
- SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCVecEltisVT<0, i16>,
+ SDTypeProfile<1, 3, [SDTCVecEltisVT<0, i16>,
SDTCVecEltisVT<1, f32>,
- SDTCisFP<1>, SDTCisVT<2, i32>,
+ SDTCisVT<2, i32>,
SDTCisVT<3, i32>]> >;
def X86vfpextRnd : SDNode<"X86ISD::VFPEXT",
- SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<0>, SDTCisFP<1>,
- SDTCVecEltisVT<0, f64>,
+ SDTypeProfile<1, 2, [SDTCVecEltisVT<0, f64>,
SDTCVecEltisVT<1, f32>,
SDTCisOpSmallerThanOp<1, 0>,
SDTCisVT<2, i32>]>>;
def X86vfproundRnd: SDNode<"X86ISD::VFPROUND",
- SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<0>, SDTCisFP<1>,
- SDTCVecEltisVT<0, f32>,
+ SDTypeProfile<1, 2, [SDTCVecEltisVT<0, f32>,
SDTCVecEltisVT<1, f64>,
SDTCisOpSmallerThanOp<0, 1>,
SDTCisVT<2, i32>]>>;
@@ -602,13 +591,13 @@ def sse_load_f64 : ComplexPattern<v2f64, 5, "selectScalarSSELoad", [],
def ssmem : Operand<v4f32> {
let PrintMethod = "printf32mem";
- let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
+ let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, SEGMENT_REG);
let ParserMatchClass = X86Mem32AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
def sdmem : Operand<v2f64> {
let PrintMethod = "printf64mem";
- let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
+ let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, SEGMENT_REG);
let ParserMatchClass = X86Mem64AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
@@ -674,11 +663,6 @@ def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() >= 16;
}]>;
-// Like 'X86vzload', but always requires 128-bit vector alignment.
-def alignedX86vzload : PatFrag<(ops node:$ptr), (X86vzload node:$ptr), [{
- return cast<MemSDNode>(N)->getAlignment() >= 16;
-}]>;
-
// Like 'load', but always requires 256-bit vector alignment.
def alignedload256 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() >= 32;
@@ -982,9 +966,9 @@ def masked_load_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3),
return isa<MaskedLoadSDNode>(N);
}]>;
-// masked store fragments.
+// Masked store fragments.
// X86mstore can't be implemented in core DAG files because some targets
-// doesn't support vector type ( llvm-tblgen will fail)
+// do not support vector types (llvm-tblgen will fail).
def X86mstore : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_store node:$src1, node:$src2, node:$src3), [{
return !cast<MaskedStoreSDNode>(N)->isTruncatingStore();
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index 246804e34289a..1672b3855b798 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -18,11 +18,13 @@
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DerivedTypes.h"
@@ -36,7 +38,6 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
-#include <limits>
using namespace llvm;
@@ -57,6 +58,17 @@ static cl::opt<bool>
ReMatPICStubLoad("remat-pic-stub-load",
cl::desc("Re-materialize load from stub in PIC mode"),
cl::init(false), cl::Hidden);
+static cl::opt<unsigned>
+PartialRegUpdateClearance("partial-reg-update-clearance",
+ cl::desc("Clearance between two register writes "
+ "for inserting XOR to avoid partial "
+ "register update"),
+ cl::init(64), cl::Hidden);
+static cl::opt<unsigned>
+UndefRegClearance("undef-reg-clearance",
+ cl::desc("How many idle instructions we would like before "
+ "certain undef register reads"),
+ cl::init(64), cl::Hidden);
enum {
// Select which memory operand is being unfolded.
@@ -105,7 +117,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
: X86::ADJCALLSTACKDOWN32),
(STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64
: X86::ADJCALLSTACKUP32),
- X86::CATCHRET),
+ X86::CATCHRET,
+ (STI.is64Bit() ? X86::RETQ : X86::RETL)),
Subtarget(STI), RI(STI.getTargetTriple()) {
static const X86MemoryFoldTableEntry MemoryFoldTable2Addr[] = {
@@ -804,50 +817,54 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::TZMSK64rr, X86::TZMSK64rm, 0 },
// AVX-512 foldable instructions
- { X86::VMOV64toPQIZrr, X86::VMOVQI2PQIZrm, 0 },
- { X86::VMOVDI2SSZrr, X86::VMOVDI2SSZrm, 0 },
- { X86::VMOVAPDZrr, X86::VMOVAPDZrm, TB_ALIGN_64 },
- { X86::VMOVAPSZrr, X86::VMOVAPSZrm, TB_ALIGN_64 },
- { X86::VMOVDQA32Zrr, X86::VMOVDQA32Zrm, TB_ALIGN_64 },
- { X86::VMOVDQA64Zrr, X86::VMOVDQA64Zrm, TB_ALIGN_64 },
- { X86::VMOVDQU8Zrr, X86::VMOVDQU8Zrm, 0 },
- { X86::VMOVDQU16Zrr, X86::VMOVDQU16Zrm, 0 },
- { X86::VMOVDQU32Zrr, X86::VMOVDQU32Zrm, 0 },
- { X86::VMOVDQU64Zrr, X86::VMOVDQU64Zrm, 0 },
- { X86::VMOVUPDZrr, X86::VMOVUPDZrm, 0 },
- { X86::VMOVUPSZrr, X86::VMOVUPSZrm, 0 },
- { X86::VPABSDZrr, X86::VPABSDZrm, 0 },
- { X86::VPABSQZrr, X86::VPABSQZrm, 0 },
- { X86::VBROADCASTSSZr, X86::VBROADCASTSSZm, TB_NO_REVERSE },
- { X86::VBROADCASTSDZr, X86::VBROADCASTSDZm, TB_NO_REVERSE },
+ { X86::VMOV64toPQIZrr, X86::VMOVQI2PQIZrm, 0 },
+ { X86::VMOVDI2SSZrr, X86::VMOVDI2SSZrm, 0 },
+ { X86::VMOVAPDZrr, X86::VMOVAPDZrm, TB_ALIGN_64 },
+ { X86::VMOVAPSZrr, X86::VMOVAPSZrm, TB_ALIGN_64 },
+ { X86::VMOVDQA32Zrr, X86::VMOVDQA32Zrm, TB_ALIGN_64 },
+ { X86::VMOVDQA64Zrr, X86::VMOVDQA64Zrm, TB_ALIGN_64 },
+ { X86::VMOVDQU8Zrr, X86::VMOVDQU8Zrm, 0 },
+ { X86::VMOVDQU16Zrr, X86::VMOVDQU16Zrm, 0 },
+ { X86::VMOVDQU32Zrr, X86::VMOVDQU32Zrm, 0 },
+ { X86::VMOVDQU64Zrr, X86::VMOVDQU64Zrm, 0 },
+ { X86::VMOVUPDZrr, X86::VMOVUPDZrm, 0 },
+ { X86::VMOVUPSZrr, X86::VMOVUPSZrm, 0 },
+ { X86::VPABSDZrr, X86::VPABSDZrm, 0 },
+ { X86::VPABSQZrr, X86::VPABSQZrm, 0 },
+ { X86::VBROADCASTSSZr, X86::VBROADCASTSSZm, TB_NO_REVERSE },
+ { X86::VBROADCASTSSZr_s, X86::VBROADCASTSSZm, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZr, X86::VBROADCASTSDZm, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZr_s, X86::VBROADCASTSDZm, TB_NO_REVERSE },
// AVX-512 foldable instructions (256-bit versions)
- { X86::VMOVAPDZ256rr, X86::VMOVAPDZ256rm, TB_ALIGN_32 },
- { X86::VMOVAPSZ256rr, X86::VMOVAPSZ256rm, TB_ALIGN_32 },
- { X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256rm, TB_ALIGN_32 },
- { X86::VMOVDQA64Z256rr, X86::VMOVDQA64Z256rm, TB_ALIGN_32 },
- { X86::VMOVDQU8Z256rr, X86::VMOVDQU8Z256rm, 0 },
- { X86::VMOVDQU16Z256rr, X86::VMOVDQU16Z256rm, 0 },
- { X86::VMOVDQU32Z256rr, X86::VMOVDQU32Z256rm, 0 },
- { X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256rm, 0 },
- { X86::VMOVUPDZ256rr, X86::VMOVUPDZ256rm, 0 },
- { X86::VMOVUPSZ256rr, X86::VMOVUPSZ256rm, 0 },
- { X86::VBROADCASTSSZ256r, X86::VBROADCASTSSZ256m, TB_NO_REVERSE },
- { X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256m, TB_NO_REVERSE },
-
- // AVX-512 foldable instructions (256-bit versions)
- { X86::VMOVAPDZ128rr, X86::VMOVAPDZ128rm, TB_ALIGN_16 },
- { X86::VMOVAPSZ128rr, X86::VMOVAPSZ128rm, TB_ALIGN_16 },
- { X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128rm, TB_ALIGN_16 },
- { X86::VMOVDQA64Z128rr, X86::VMOVDQA64Z128rm, TB_ALIGN_16 },
- { X86::VMOVDQU8Z128rr, X86::VMOVDQU8Z128rm, 0 },
- { X86::VMOVDQU16Z128rr, X86::VMOVDQU16Z128rm, 0 },
- { X86::VMOVDQU32Z128rr, X86::VMOVDQU32Z128rm, 0 },
- { X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128rm, 0 },
- { X86::VMOVUPDZ128rr, X86::VMOVUPDZ128rm, 0 },
- { X86::VMOVUPSZ128rr, X86::VMOVUPSZ128rm, 0 },
- { X86::VBROADCASTSSZ128r, X86::VBROADCASTSSZ128m, TB_NO_REVERSE },
+ { X86::VMOVAPDZ256rr, X86::VMOVAPDZ256rm, TB_ALIGN_32 },
+ { X86::VMOVAPSZ256rr, X86::VMOVAPSZ256rm, TB_ALIGN_32 },
+ { X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256rm, TB_ALIGN_32 },
+ { X86::VMOVDQA64Z256rr, X86::VMOVDQA64Z256rm, TB_ALIGN_32 },
+ { X86::VMOVDQU8Z256rr, X86::VMOVDQU8Z256rm, 0 },
+ { X86::VMOVDQU16Z256rr, X86::VMOVDQU16Z256rm, 0 },
+ { X86::VMOVDQU32Z256rr, X86::VMOVDQU32Z256rm, 0 },
+ { X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256rm, 0 },
+ { X86::VMOVUPDZ256rr, X86::VMOVUPDZ256rm, 0 },
+ { X86::VMOVUPSZ256rr, X86::VMOVUPSZ256rm, 0 },
+ { X86::VBROADCASTSSZ256r, X86::VBROADCASTSSZ256m, TB_NO_REVERSE },
+ { X86::VBROADCASTSSZ256r_s, X86::VBROADCASTSSZ256m, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256m, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZ256r_s, X86::VBROADCASTSDZ256m, TB_NO_REVERSE },
+ // AVX-512 foldable instructions (128-bit versions)
+ { X86::VMOVAPDZ128rr, X86::VMOVAPDZ128rm, TB_ALIGN_16 },
+ { X86::VMOVAPSZ128rr, X86::VMOVAPSZ128rm, TB_ALIGN_16 },
+ { X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128rm, TB_ALIGN_16 },
+ { X86::VMOVDQA64Z128rr, X86::VMOVDQA64Z128rm, TB_ALIGN_16 },
+ { X86::VMOVDQU8Z128rr, X86::VMOVDQU8Z128rm, 0 },
+ { X86::VMOVDQU16Z128rr, X86::VMOVDQU16Z128rm, 0 },
+ { X86::VMOVDQU32Z128rr, X86::VMOVDQU32Z128rm, 0 },
+ { X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128rm, 0 },
+ { X86::VMOVUPDZ128rr, X86::VMOVUPDZ128rm, 0 },
+ { X86::VMOVUPSZ128rr, X86::VMOVUPSZ128rm, 0 },
+ { X86::VBROADCASTSSZ128r, X86::VBROADCASTSSZ128m, TB_NO_REVERSE },
+ { X86::VBROADCASTSSZ128r_s, X86::VBROADCASTSSZ128m, TB_NO_REVERSE },
// F16C foldable instructions
{ X86::VCVTPH2PSrr, X86::VCVTPH2PSrm, 0 },
{ X86::VCVTPH2PSYrr, X86::VCVTPH2PSYrm, 0 },
@@ -998,6 +1015,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::MINSDrr_Int, X86::MINSDrm_Int, 0 },
{ X86::MINSSrr, X86::MINSSrm, 0 },
{ X86::MINSSrr_Int, X86::MINSSrm_Int, 0 },
+ { X86::MOVLHPSrr, X86::MOVHPSrm, TB_NO_REVERSE },
{ X86::MPSADBWrri, X86::MPSADBWrmi, TB_ALIGN_16 },
{ X86::MULPDrr, X86::MULPDrm, TB_ALIGN_16 },
{ X86::MULPSrr, X86::MULPSrm, TB_ALIGN_16 },
@@ -1023,7 +1041,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::PADDUSBrr, X86::PADDUSBrm, TB_ALIGN_16 },
{ X86::PADDUSWrr, X86::PADDUSWrm, TB_ALIGN_16 },
{ X86::PADDWrr, X86::PADDWrm, TB_ALIGN_16 },
- { X86::PALIGNR128rr, X86::PALIGNR128rm, TB_ALIGN_16 },
+ { X86::PALIGNRrri, X86::PALIGNRrmi, TB_ALIGN_16 },
{ X86::PANDNrr, X86::PANDNrm, TB_ALIGN_16 },
{ X86::PANDrr, X86::PANDrm, TB_ALIGN_16 },
{ X86::PAVGBrr, X86::PAVGBrm, TB_ALIGN_16 },
@@ -1073,9 +1091,9 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::PORrr, X86::PORrm, TB_ALIGN_16 },
{ X86::PSADBWrr, X86::PSADBWrm, TB_ALIGN_16 },
{ X86::PSHUFBrr, X86::PSHUFBrm, TB_ALIGN_16 },
- { X86::PSIGNBrr, X86::PSIGNBrm, TB_ALIGN_16 },
- { X86::PSIGNWrr, X86::PSIGNWrm, TB_ALIGN_16 },
- { X86::PSIGNDrr, X86::PSIGNDrm, TB_ALIGN_16 },
+ { X86::PSIGNBrr128, X86::PSIGNBrm128, TB_ALIGN_16 },
+ { X86::PSIGNWrr128, X86::PSIGNWrm128, TB_ALIGN_16 },
+ { X86::PSIGNDrr128, X86::PSIGNDrm128, TB_ALIGN_16 },
{ X86::PSLLDrr, X86::PSLLDrm, TB_ALIGN_16 },
{ X86::PSLLQrr, X86::PSLLQrm, TB_ALIGN_16 },
{ X86::PSLLWrr, X86::PSLLWrm, TB_ALIGN_16 },
@@ -1298,6 +1316,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VMINSDrr_Int, X86::VMINSDrm_Int, 0 },
{ X86::VMINSSrr, X86::VMINSSrm, 0 },
{ X86::VMINSSrr_Int, X86::VMINSSrm_Int, 0 },
+ { X86::VMOVLHPSrr, X86::VMOVHPSrm, TB_NO_REVERSE },
{ X86::VMPSADBWrri, X86::VMPSADBWrmi, 0 },
{ X86::VMULPDrr, X86::VMULPDrm, 0 },
{ X86::VMULPSrr, X86::VMULPSrm, 0 },
@@ -1319,7 +1338,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPADDUSBrr, X86::VPADDUSBrm, 0 },
{ X86::VPADDUSWrr, X86::VPADDUSWrm, 0 },
{ X86::VPADDWrr, X86::VPADDWrm, 0 },
- { X86::VPALIGNR128rr, X86::VPALIGNR128rm, 0 },
+ { X86::VPALIGNRrri, X86::VPALIGNRrmi, 0 },
{ X86::VPANDNrr, X86::VPANDNrm, 0 },
{ X86::VPANDrr, X86::VPANDrm, 0 },
{ X86::VPAVGBrr, X86::VPAVGBrm, 0 },
@@ -1371,9 +1390,9 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPORrr, X86::VPORrm, 0 },
{ X86::VPSADBWrr, X86::VPSADBWrm, 0 },
{ X86::VPSHUFBrr, X86::VPSHUFBrm, 0 },
- { X86::VPSIGNBrr, X86::VPSIGNBrm, 0 },
- { X86::VPSIGNWrr, X86::VPSIGNWrm, 0 },
- { X86::VPSIGNDrr, X86::VPSIGNDrm, 0 },
+ { X86::VPSIGNBrr128, X86::VPSIGNBrm128, 0 },
+ { X86::VPSIGNWrr128, X86::VPSIGNWrm128, 0 },
+ { X86::VPSIGNDrr128, X86::VPSIGNDrm128, 0 },
{ X86::VPSLLDrr, X86::VPSLLDrm, 0 },
{ X86::VPSLLQrr, X86::VPSLLQrm, 0 },
{ X86::VPSLLWrr, X86::VPSLLWrm, 0 },
@@ -1475,7 +1494,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPADDUSBYrr, X86::VPADDUSBYrm, 0 },
{ X86::VPADDUSWYrr, X86::VPADDUSWYrm, 0 },
{ X86::VPADDWYrr, X86::VPADDWYrm, 0 },
- { X86::VPALIGNR256rr, X86::VPALIGNR256rm, 0 },
+ { X86::VPALIGNRYrri, X86::VPALIGNRYrmi, 0 },
{ X86::VPANDNYrr, X86::VPANDNYrm, 0 },
{ X86::VPANDYrr, X86::VPANDYrm, 0 },
{ X86::VPAVGBYrr, X86::VPAVGBYrm, 0 },
@@ -1526,9 +1545,9 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPORYrr, X86::VPORYrm, 0 },
{ X86::VPSADBWYrr, X86::VPSADBWYrm, 0 },
{ X86::VPSHUFBYrr, X86::VPSHUFBYrm, 0 },
- { X86::VPSIGNBYrr, X86::VPSIGNBYrm, 0 },
- { X86::VPSIGNWYrr, X86::VPSIGNWYrm, 0 },
- { X86::VPSIGNDYrr, X86::VPSIGNDYrm, 0 },
+ { X86::VPSIGNBYrr256, X86::VPSIGNBYrm256, 0 },
+ { X86::VPSIGNWYrr256, X86::VPSIGNWYrm256, 0 },
+ { X86::VPSIGNDYrr256, X86::VPSIGNDYrm256, 0 },
{ X86::VPSLLDYrr, X86::VPSLLDYrm, 0 },
{ X86::VPSLLQYrr, X86::VPSLLQYrm, 0 },
{ X86::VPSLLWYrr, X86::VPSLLWYrm, 0 },
@@ -1540,6 +1559,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPSRAWYrr, X86::VPSRAWYrm, 0 },
{ X86::VPSRAVDrr, X86::VPSRAVDrm, 0 },
{ X86::VPSRAVDYrr, X86::VPSRAVDYrm, 0 },
+ { X86::VPSRAVD_Intrr, X86::VPSRAVD_Intrm, 0 },
+ { X86::VPSRAVD_IntYrr, X86::VPSRAVD_IntYrm, 0 },
{ X86::VPSRLDYrr, X86::VPSRLDYrm, 0 },
{ X86::VPSRLQYrr, X86::VPSRLQYrm, 0 },
{ X86::VPSRLWYrr, X86::VPSRLWYrm, 0 },
@@ -1600,8 +1621,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VFMSUBADDPD4rrY, X86::VFMSUBADDPD4mrY, TB_ALIGN_NONE },
// XOP foldable instructions
- { X86::VPCMOVrr, X86::VPCMOVmr, 0 },
- { X86::VPCMOVrrY, X86::VPCMOVmrY, 0 },
+ { X86::VPCMOVrrr, X86::VPCMOVrmr, 0 },
+ { X86::VPCMOVrrrY, X86::VPCMOVrmrY, 0 },
{ X86::VPCOMBri, X86::VPCOMBmi, 0 },
{ X86::VPCOMDri, X86::VPCOMDmi, 0 },
{ X86::VPCOMQri, X86::VPCOMQmi, 0 },
@@ -1626,7 +1647,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPMACSWWrr, X86::VPMACSWWrm, 0 },
{ X86::VPMADCSSWDrr, X86::VPMADCSSWDrm, 0 },
{ X86::VPMADCSWDrr, X86::VPMADCSWDrm, 0 },
- { X86::VPPERMrr, X86::VPPERMmr, 0 },
+ { X86::VPPERMrrr, X86::VPPERMrmr, 0 },
{ X86::VPROTBrr, X86::VPROTBrm, 0 },
{ X86::VPROTDrr, X86::VPROTDrm, 0 },
{ X86::VPROTQrr, X86::VPROTQrm, 0 },
@@ -1659,12 +1680,28 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
// AVX-512 foldable instructions
{ X86::VADDPSZrr, X86::VADDPSZrm, 0 },
{ X86::VADDPDZrr, X86::VADDPDZrm, 0 },
+ { X86::VADDSSZrr, X86::VADDSSZrm, 0 },
+ { X86::VADDSSZrr_Int, X86::VADDSSZrm_Int, 0 },
+ { X86::VADDSDZrr, X86::VADDSDZrm, 0 },
+ { X86::VADDSDZrr_Int, X86::VADDSDZrm_Int, 0 },
{ X86::VSUBPSZrr, X86::VSUBPSZrm, 0 },
{ X86::VSUBPDZrr, X86::VSUBPDZrm, 0 },
+ { X86::VSUBSSZrr, X86::VSUBSSZrm, 0 },
+ { X86::VSUBSSZrr_Int, X86::VSUBSSZrm_Int, 0 },
+ { X86::VSUBSDZrr, X86::VSUBSDZrm, 0 },
+ { X86::VSUBSDZrr_Int, X86::VSUBSDZrm_Int, 0 },
{ X86::VMULPSZrr, X86::VMULPSZrm, 0 },
{ X86::VMULPDZrr, X86::VMULPDZrm, 0 },
+ { X86::VMULSSZrr, X86::VMULSSZrm, 0 },
+ { X86::VMULSSZrr_Int, X86::VMULSSZrm_Int, 0 },
+ { X86::VMULSDZrr, X86::VMULSDZrm, 0 },
+ { X86::VMULSDZrr_Int, X86::VMULSDZrm_Int, 0 },
{ X86::VDIVPSZrr, X86::VDIVPSZrm, 0 },
{ X86::VDIVPDZrr, X86::VDIVPDZrm, 0 },
+ { X86::VDIVSSZrr, X86::VDIVSSZrm, 0 },
+ { X86::VDIVSSZrr_Int, X86::VDIVSSZrm_Int, 0 },
+ { X86::VDIVSDZrr, X86::VDIVSDZrm, 0 },
+ { X86::VDIVSDZrr_Int, X86::VDIVSDZrm_Int, 0 },
{ X86::VMINPSZrr, X86::VMINPSZrm, 0 },
{ X86::VMINPDZrr, X86::VMINPDZrm, 0 },
{ X86::VMAXPSZrr, X86::VMAXPSZrm, 0 },
@@ -1902,13 +1939,13 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VFMSUBADDPD4rrY, X86::VFMSUBADDPD4rmY, TB_ALIGN_NONE },
// XOP foldable instructions
- { X86::VPCMOVrr, X86::VPCMOVrm, 0 },
- { X86::VPCMOVrrY, X86::VPCMOVrmY, 0 },
+ { X86::VPCMOVrrr, X86::VPCMOVrrm, 0 },
+ { X86::VPCMOVrrrY, X86::VPCMOVrrmY, 0 },
{ X86::VPERMIL2PDrr, X86::VPERMIL2PDrm, 0 },
{ X86::VPERMIL2PDrrY, X86::VPERMIL2PDrmY, 0 },
{ X86::VPERMIL2PSrr, X86::VPERMIL2PSrm, 0 },
{ X86::VPERMIL2PSrrY, X86::VPERMIL2PSrmY, 0 },
- { X86::VPPERMrr, X86::VPPERMrm, 0 },
+ { X86::VPPERMrrr, X86::VPPERMrrm, 0 },
// AVX-512 VPERMI instructions with 3 source operands.
{ X86::VPERMI2Drr, X86::VPERMI2Drm, 0 },
@@ -2025,7 +2062,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
void
X86InstrInfo::AddTableEntry(RegOp2MemOpTableType &R2MTable,
MemOp2RegOpTableType &M2RTable,
- unsigned RegOp, unsigned MemOp, unsigned Flags) {
+ uint16_t RegOp, uint16_t MemOp, uint16_t Flags) {
if ((Flags & TB_NO_FORWARD) == 0) {
assert(!R2MTable.count(RegOp) && "Duplicate entry!");
R2MTable[RegOp] = std::make_pair(MemOp, Flags);
@@ -2085,19 +2122,19 @@ X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
return false;
}
-int X86InstrInfo::getSPAdjust(const MachineInstr *MI) const {
- const MachineFunction *MF = MI->getParent()->getParent();
+int X86InstrInfo::getSPAdjust(const MachineInstr &MI) const {
+ const MachineFunction *MF = MI.getParent()->getParent();
const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
- if (MI->getOpcode() == getCallFrameSetupOpcode() ||
- MI->getOpcode() == getCallFrameDestroyOpcode()) {
+ if (MI.getOpcode() == getCallFrameSetupOpcode() ||
+ MI.getOpcode() == getCallFrameDestroyOpcode()) {
unsigned StackAlign = TFI->getStackAlignment();
- int SPAdj = (MI->getOperand(0).getImm() + StackAlign - 1) / StackAlign *
- StackAlign;
+ int SPAdj =
+ (MI.getOperand(0).getImm() + StackAlign - 1) / StackAlign * StackAlign;
- SPAdj -= MI->getOperand(1).getImm();
+ SPAdj -= MI.getOperand(1).getImm();
- if (MI->getOpcode() == getCallFrameSetupOpcode())
+ if (MI.getOpcode() == getCallFrameSetupOpcode())
return SPAdj;
else
return -SPAdj;
@@ -2106,8 +2143,8 @@ int X86InstrInfo::getSPAdjust(const MachineInstr *MI) const {
// To know whether a call adjusts the stack, we need information
// that is bound to the following ADJCALLSTACKUP pseudo.
// Look for the next ADJCALLSTACKUP that follows the call.
- if (MI->isCall()) {
- const MachineBasicBlock* MBB = MI->getParent();
+ if (MI.isCall()) {
+ const MachineBasicBlock *MBB = MI.getParent();
auto I = ++MachineBasicBlock::const_iterator(MI);
for (auto E = MBB->end(); I != E; ++I) {
if (I->getOpcode() == getCallFrameDestroyOpcode() ||
@@ -2125,7 +2162,7 @@ int X86InstrInfo::getSPAdjust(const MachineInstr *MI) const {
// Currently handle only PUSHes we can reasonably expect to see
// in call sequences
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default:
return 0;
case X86::PUSH32i8:
@@ -2134,21 +2171,27 @@ int X86InstrInfo::getSPAdjust(const MachineInstr *MI) const {
case X86::PUSH32rmr:
case X86::PUSHi32:
return 4;
+ case X86::PUSH64i8:
+ case X86::PUSH64r:
+ case X86::PUSH64rmm:
+ case X86::PUSH64rmr:
+ case X86::PUSH64i32:
+ return 8;
}
}
/// Return true and the FrameIndex if the specified
/// operand and follow operands form a reference to the stack frame.
-bool X86InstrInfo::isFrameOperand(const MachineInstr *MI, unsigned int Op,
+bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op,
int &FrameIndex) const {
- if (MI->getOperand(Op+X86::AddrBaseReg).isFI() &&
- MI->getOperand(Op+X86::AddrScaleAmt).isImm() &&
- MI->getOperand(Op+X86::AddrIndexReg).isReg() &&
- MI->getOperand(Op+X86::AddrDisp).isImm() &&
- MI->getOperand(Op+X86::AddrScaleAmt).getImm() == 1 &&
- MI->getOperand(Op+X86::AddrIndexReg).getReg() == 0 &&
- MI->getOperand(Op+X86::AddrDisp).getImm() == 0) {
- FrameIndex = MI->getOperand(Op+X86::AddrBaseReg).getIndex();
+ if (MI.getOperand(Op + X86::AddrBaseReg).isFI() &&
+ MI.getOperand(Op + X86::AddrScaleAmt).isImm() &&
+ MI.getOperand(Op + X86::AddrIndexReg).isReg() &&
+ MI.getOperand(Op + X86::AddrDisp).isImm() &&
+ MI.getOperand(Op + X86::AddrScaleAmt).getImm() == 1 &&
+ MI.getOperand(Op + X86::AddrIndexReg).getReg() == 0 &&
+ MI.getOperand(Op + X86::AddrDisp).getImm() == 0) {
+ FrameIndex = MI.getOperand(Op + X86::AddrBaseReg).getIndex();
return true;
}
return false;
@@ -2166,13 +2209,19 @@ static bool isFrameLoadOpcode(int Opcode) {
case X86::MOVSSrm:
case X86::MOVSDrm:
case X86::MOVAPSrm:
+ case X86::MOVUPSrm:
case X86::MOVAPDrm:
+ case X86::MOVUPDrm:
case X86::MOVDQArm:
+ case X86::MOVDQUrm:
case X86::VMOVSSrm:
case X86::VMOVSDrm:
case X86::VMOVAPSrm:
+ case X86::VMOVUPSrm:
case X86::VMOVAPDrm:
+ case X86::VMOVUPDrm:
case X86::VMOVDQArm:
+ case X86::VMOVDQUrm:
case X86::VMOVUPSYrm:
case X86::VMOVAPSYrm:
case X86::VMOVUPDYrm:
@@ -2181,8 +2230,42 @@ static bool isFrameLoadOpcode(int Opcode) {
case X86::VMOVDQAYrm:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
+ case X86::VMOVSSZrm:
+ case X86::VMOVSDZrm:
case X86::VMOVAPSZrm:
+ case X86::VMOVAPSZ128rm:
+ case X86::VMOVAPSZ256rm:
case X86::VMOVUPSZrm:
+ case X86::VMOVUPSZ128rm:
+ case X86::VMOVUPSZ256rm:
+ case X86::VMOVAPDZrm:
+ case X86::VMOVAPDZ128rm:
+ case X86::VMOVAPDZ256rm:
+ case X86::VMOVUPDZrm:
+ case X86::VMOVUPDZ128rm:
+ case X86::VMOVUPDZ256rm:
+ case X86::VMOVDQA32Zrm:
+ case X86::VMOVDQA32Z128rm:
+ case X86::VMOVDQA32Z256rm:
+ case X86::VMOVDQU32Zrm:
+ case X86::VMOVDQU32Z128rm:
+ case X86::VMOVDQU32Z256rm:
+ case X86::VMOVDQA64Zrm:
+ case X86::VMOVDQA64Z128rm:
+ case X86::VMOVDQA64Z256rm:
+ case X86::VMOVDQU64Zrm:
+ case X86::VMOVDQU64Z128rm:
+ case X86::VMOVDQU64Z256rm:
+ case X86::VMOVDQU8Zrm:
+ case X86::VMOVDQU8Z128rm:
+ case X86::VMOVDQU8Z256rm:
+ case X86::VMOVDQU16Zrm:
+ case X86::VMOVDQU16Z128rm:
+ case X86::VMOVDQU16Z256rm:
+ case X86::KMOVBkm:
+ case X86::KMOVWkm:
+ case X86::KMOVDkm:
+ case X86::KMOVQkm:
return true;
}
}
@@ -2198,40 +2281,80 @@ static bool isFrameStoreOpcode(int Opcode) {
case X86::MOVSSmr:
case X86::MOVSDmr:
case X86::MOVAPSmr:
+ case X86::MOVUPSmr:
case X86::MOVAPDmr:
+ case X86::MOVUPDmr:
case X86::MOVDQAmr:
+ case X86::MOVDQUmr:
case X86::VMOVSSmr:
case X86::VMOVSDmr:
case X86::VMOVAPSmr:
+ case X86::VMOVUPSmr:
case X86::VMOVAPDmr:
+ case X86::VMOVUPDmr:
case X86::VMOVDQAmr:
+ case X86::VMOVDQUmr:
case X86::VMOVUPSYmr:
case X86::VMOVAPSYmr:
case X86::VMOVUPDYmr:
case X86::VMOVAPDYmr:
case X86::VMOVDQUYmr:
case X86::VMOVDQAYmr:
+ case X86::VMOVSSZmr:
+ case X86::VMOVSDZmr:
case X86::VMOVUPSZmr:
+ case X86::VMOVUPSZ128mr:
+ case X86::VMOVUPSZ256mr:
case X86::VMOVAPSZmr:
+ case X86::VMOVAPSZ128mr:
+ case X86::VMOVAPSZ256mr:
+ case X86::VMOVUPDZmr:
+ case X86::VMOVUPDZ128mr:
+ case X86::VMOVUPDZ256mr:
+ case X86::VMOVAPDZmr:
+ case X86::VMOVAPDZ128mr:
+ case X86::VMOVAPDZ256mr:
+ case X86::VMOVDQA32Zmr:
+ case X86::VMOVDQA32Z128mr:
+ case X86::VMOVDQA32Z256mr:
+ case X86::VMOVDQU32Zmr:
+ case X86::VMOVDQU32Z128mr:
+ case X86::VMOVDQU32Z256mr:
+ case X86::VMOVDQA64Zmr:
+ case X86::VMOVDQA64Z128mr:
+ case X86::VMOVDQA64Z256mr:
+ case X86::VMOVDQU64Zmr:
+ case X86::VMOVDQU64Z128mr:
+ case X86::VMOVDQU64Z256mr:
+ case X86::VMOVDQU8Zmr:
+ case X86::VMOVDQU8Z128mr:
+ case X86::VMOVDQU8Z256mr:
+ case X86::VMOVDQU16Zmr:
+ case X86::VMOVDQU16Z128mr:
+ case X86::VMOVDQU16Z256mr:
case X86::MMX_MOVD64mr:
case X86::MMX_MOVQ64mr:
case X86::MMX_MOVNTQmr:
+ case X86::KMOVBmk:
+ case X86::KMOVWmk:
+ case X86::KMOVDmk:
+ case X86::KMOVQmk:
return true;
}
return false;
}
-unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
- if (isFrameLoadOpcode(MI->getOpcode()))
- if (MI->getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
- return MI->getOperand(0).getReg();
+ if (isFrameLoadOpcode(MI.getOpcode()))
+ if (MI.getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
+ return MI.getOperand(0).getReg();
return 0;
}
-unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
+unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const {
- if (isFrameLoadOpcode(MI->getOpcode())) {
+ if (isFrameLoadOpcode(MI.getOpcode())) {
unsigned Reg;
if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
return Reg;
@@ -2242,18 +2365,18 @@ unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
return 0;
}
-unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
- if (isFrameStoreOpcode(MI->getOpcode()))
- if (MI->getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
+ if (isFrameStoreOpcode(MI.getOpcode()))
+ if (MI.getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
isFrameOperand(MI, 0, FrameIndex))
- return MI->getOperand(X86::AddrNumOperands).getReg();
+ return MI.getOperand(X86::AddrNumOperands).getReg();
return 0;
}
-unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI,
+unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const {
- if (isFrameStoreOpcode(MI->getOpcode())) {
+ if (isFrameStoreOpcode(MI.getOpcode())) {
unsigned Reg;
if ((Reg = isStoreToStackSlot(MI, FrameIndex)))
return Reg;
@@ -2281,10 +2404,9 @@ static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
return isPICBase;
}
-bool
-X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
- AliasAnalysis *AA) const {
- switch (MI->getOpcode()) {
+bool X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
+ AliasAnalysis *AA) const {
+ switch (MI.getOpcode()) {
default: break;
case X86::MOV8rm:
case X86::MOV16rm:
@@ -2345,18 +2467,18 @@ X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
case X86::VMOVUPSZ256rm:
case X86::VMOVUPSZrm: {
// Loads from constant pools are trivially rematerializable.
- if (MI->getOperand(1+X86::AddrBaseReg).isReg() &&
- MI->getOperand(1+X86::AddrScaleAmt).isImm() &&
- MI->getOperand(1+X86::AddrIndexReg).isReg() &&
- MI->getOperand(1+X86::AddrIndexReg).getReg() == 0 &&
- MI->isInvariantLoad(AA)) {
- unsigned BaseReg = MI->getOperand(1+X86::AddrBaseReg).getReg();
+ if (MI.getOperand(1 + X86::AddrBaseReg).isReg() &&
+ MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
+ MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
+ MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
+ MI.isInvariantLoad(AA)) {
+ unsigned BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
if (BaseReg == 0 || BaseReg == X86::RIP)
return true;
// Allow re-materialization of PIC load.
- if (!ReMatPICStubLoad && MI->getOperand(1+X86::AddrDisp).isGlobal())
+ if (!ReMatPICStubLoad && MI.getOperand(1 + X86::AddrDisp).isGlobal())
return false;
- const MachineFunction &MF = *MI->getParent()->getParent();
+ const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
return regIsPICBase(BaseReg, MRI);
}
@@ -2365,18 +2487,18 @@ X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
case X86::LEA32r:
case X86::LEA64r: {
- if (MI->getOperand(1+X86::AddrScaleAmt).isImm() &&
- MI->getOperand(1+X86::AddrIndexReg).isReg() &&
- MI->getOperand(1+X86::AddrIndexReg).getReg() == 0 &&
- !MI->getOperand(1+X86::AddrDisp).isReg()) {
+ if (MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
+ MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
+ MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
+ !MI.getOperand(1 + X86::AddrDisp).isReg()) {
// lea fi#, lea GV, etc. are all rematerializable.
- if (!MI->getOperand(1+X86::AddrBaseReg).isReg())
+ if (!MI.getOperand(1 + X86::AddrBaseReg).isReg())
return true;
- unsigned BaseReg = MI->getOperand(1+X86::AddrBaseReg).getReg();
+ unsigned BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
if (BaseReg == 0)
return true;
// Allow re-materialization of lea PICBase + x.
- const MachineFunction &MF = *MI->getParent()->getParent();
+ const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
return regIsPICBase(BaseReg, MRI);
}
@@ -2469,10 +2591,10 @@ bool X86InstrInfo::isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SubIdx,
- const MachineInstr *Orig,
+ const MachineInstr &Orig,
const TargetRegisterInfo &TRI) const {
bool ClobbersEFLAGS = false;
- for (const MachineOperand &MO : Orig->operands()) {
+ for (const MachineOperand &MO : Orig.operands()) {
if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS) {
ClobbersEFLAGS = true;
break;
@@ -2483,7 +2605,7 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
// The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side
// effects.
int Value;
- switch (Orig->getOpcode()) {
+ switch (Orig.getOpcode()) {
case X86::MOV32r0: Value = 0; break;
case X86::MOV32r1: Value = 1; break;
case X86::MOV32r_1: Value = -1; break;
@@ -2491,22 +2613,23 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
llvm_unreachable("Unexpected instruction!");
}
- DebugLoc DL = Orig->getDebugLoc();
- BuildMI(MBB, I, DL, get(X86::MOV32ri)).addOperand(Orig->getOperand(0))
- .addImm(Value);
+ const DebugLoc &DL = Orig.getDebugLoc();
+ BuildMI(MBB, I, DL, get(X86::MOV32ri))
+ .addOperand(Orig.getOperand(0))
+ .addImm(Value);
} else {
- MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
+ MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
MBB.insert(I, MI);
}
- MachineInstr *NewMI = std::prev(I);
- NewMI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
+ MachineInstr &NewMI = *std::prev(I);
+ NewMI.substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
}
/// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead.
-bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr *MI) const {
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
+bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr &MI) const {
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
if (MO.isReg() && MO.isDef() &&
MO.getReg() == X86::EFLAGS && !MO.isDead()) {
return true;
@@ -2516,11 +2639,11 @@ bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr *MI) const {
}
/// Check whether the shift count for a machine operand is non-zero.
-inline static unsigned getTruncatedShiftCount(MachineInstr *MI,
+inline static unsigned getTruncatedShiftCount(MachineInstr &MI,
unsigned ShiftAmtOperandIdx) {
// The shift count is six bits with the REX.W prefix and five bits without.
- unsigned ShiftCountMask = (MI->getDesc().TSFlags & X86II::REX_W) ? 63 : 31;
- unsigned Imm = MI->getOperand(ShiftAmtOperandIdx).getImm();
+ unsigned ShiftCountMask = (MI.getDesc().TSFlags & X86II::REX_W) ? 63 : 31;
+ unsigned Imm = MI.getOperand(ShiftAmtOperandIdx).getImm();
return Imm & ShiftCountMask;
}
@@ -2535,11 +2658,11 @@ inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) {
return ShAmt < 4 && ShAmt > 0;
}
-bool X86InstrInfo::classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
- unsigned Opc, bool AllowSP,
- unsigned &NewSrc, bool &isKill, bool &isUndef,
+bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
+ unsigned Opc, bool AllowSP, unsigned &NewSrc,
+ bool &isKill, bool &isUndef,
MachineOperand &ImplicitOp) const {
- MachineFunction &MF = *MI->getParent()->getParent();
+ MachineFunction &MF = *MI.getParent()->getParent();
const TargetRegisterClass *RC;
if (AllowSP) {
RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass;
@@ -2571,7 +2694,7 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
NewSrc = getX86SubSuperRegister(Src.getReg(), 64);
MachineBasicBlock::LivenessQueryResult LQR =
- MI->getParent()->computeRegisterLiveness(&getRegisterInfo(), NewSrc, MI);
+ MI.getParent()->computeRegisterLiveness(&getRegisterInfo(), NewSrc, MI);
switch (LQR) {
case MachineBasicBlock::LQR_Unknown:
@@ -2579,7 +2702,7 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
// formation.
return false;
case MachineBasicBlock::LQR_Live:
- isKill = MI->killsRegister(SrcReg);
+ isKill = MI.killsRegister(SrcReg);
isUndef = false;
break;
default:
@@ -2592,9 +2715,8 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
// Virtual register of the wrong class, we have to create a temporary 64-bit
// vreg to feed into the LEA.
NewSrc = MF.getRegInfo().createVirtualRegister(RC);
- BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
- get(TargetOpcode::COPY))
- .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit)
+ BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY))
+ .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit)
.addOperand(Src);
// Which is obviously going to be dead after we're done with it.
@@ -2609,16 +2731,14 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
/// Helper for convertToThreeAddress when 16-bit LEA is disabled, use 32-bit
/// LEA to form 3-address code by promoting to a 32-bit superregister and then
/// truncating back down to a 16-bit subregister.
-MachineInstr *
-X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
- MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const {
- MachineInstr *MI = MBBI;
- unsigned Dest = MI->getOperand(0).getReg();
- unsigned Src = MI->getOperand(1).getReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isKill = MI->getOperand(1).isKill();
+MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(
+ unsigned MIOpc, MachineFunction::iterator &MFI, MachineInstr &MI,
+ LiveVariables *LV) const {
+ MachineBasicBlock::iterator MBBI = MI.getIterator();
+ unsigned Dest = MI.getOperand(0).getReg();
+ unsigned Src = MI.getOperand(1).getReg();
+ bool isDead = MI.getOperand(0).isDead();
+ bool isKill = MI.getOperand(1).isKill();
MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
@@ -2638,19 +2758,19 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
// leal -65(%rdx), %esi
// But testing has shown this *does* help performance in 64-bit mode (at
// least on modern x86 machines).
- BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg);
+ BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg);
MachineInstr *InsMI =
- BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
- .addReg(leaInReg, RegState::Define, X86::sub_16bit)
- .addReg(Src, getKillRegState(isKill));
+ BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
+ .addReg(leaInReg, RegState::Define, X86::sub_16bit)
+ .addReg(Src, getKillRegState(isKill));
- MachineInstrBuilder MIB = BuildMI(*MFI, MBBI, MI->getDebugLoc(),
- get(Opc), leaOutReg);
+ MachineInstrBuilder MIB =
+ BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(Opc), leaOutReg);
switch (MIOpc) {
default: llvm_unreachable("Unreachable!");
case X86::SHL16ri: {
- unsigned ShAmt = MI->getOperand(2).getImm();
- MIB.addReg(0).addImm(1 << ShAmt)
+ unsigned ShAmt = MI.getOperand(2).getImm();
+ MIB.addReg(0).addImm(1ULL << ShAmt)
.addReg(leaInReg, RegState::Kill).addImm(0).addReg(0);
break;
}
@@ -2664,12 +2784,12 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
case X86::ADD16ri8:
case X86::ADD16ri_DB:
case X86::ADD16ri8_DB:
- addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());
+ addRegOffset(MIB, leaInReg, true, MI.getOperand(2).getImm());
break;
case X86::ADD16rr:
case X86::ADD16rr_DB: {
- unsigned Src2 = MI->getOperand(2).getReg();
- bool isKill2 = MI->getOperand(2).isKill();
+ unsigned Src2 = MI.getOperand(2).getReg();
+ bool isKill2 = MI.getOperand(2).isKill();
unsigned leaInReg2 = 0;
MachineInstr *InsMI2 = nullptr;
if (Src == Src2) {
@@ -2683,33 +2803,32 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
// Build and insert into an implicit UNDEF value. This is OK because
// well be shifting and then extracting the lower 16-bits.
- BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF),leaInReg2);
- InsMI2 =
- BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(TargetOpcode::COPY))
- .addReg(leaInReg2, RegState::Define, X86::sub_16bit)
- .addReg(Src2, getKillRegState(isKill2));
+ BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg2);
+ InsMI2 = BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY))
+ .addReg(leaInReg2, RegState::Define, X86::sub_16bit)
+ .addReg(Src2, getKillRegState(isKill2));
addRegReg(MIB, leaInReg, true, leaInReg2, true);
}
if (LV && isKill2 && InsMI2)
- LV->replaceKillInstruction(Src2, MI, InsMI2);
+ LV->replaceKillInstruction(Src2, MI, *InsMI2);
break;
}
}
MachineInstr *NewMI = MIB;
MachineInstr *ExtMI =
- BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
- .addReg(Dest, RegState::Define | getDeadRegState(isDead))
- .addReg(leaOutReg, RegState::Kill, X86::sub_16bit);
+ BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
+ .addReg(Dest, RegState::Define | getDeadRegState(isDead))
+ .addReg(leaOutReg, RegState::Kill, X86::sub_16bit);
if (LV) {
// Update live variables
LV->getVarInfo(leaInReg).Kills.push_back(NewMI);
LV->getVarInfo(leaOutReg).Kills.push_back(ExtMI);
if (isKill)
- LV->replaceKillInstruction(Src, MI, InsMI);
+ LV->replaceKillInstruction(Src, MI, *InsMI);
if (isDead)
- LV->replaceKillInstruction(Dest, MI, ExtMI);
+ LV->replaceKillInstruction(Dest, MI, *ExtMI);
}
return ExtMI;
@@ -2727,20 +2846,17 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
///
MachineInstr *
X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const {
- MachineInstr *MI = MBBI;
-
+ MachineInstr &MI, LiveVariables *LV) const {
// The following opcodes also sets the condition code register(s). Only
// convert them to equivalent lea if the condition code register def's
// are dead!
if (hasLiveCondCodeDef(MI))
return nullptr;
- MachineFunction &MF = *MI->getParent()->getParent();
+ MachineFunction &MF = *MI.getParent()->getParent();
// All instructions input are two-addr instructions. Get the known operands.
- const MachineOperand &Dest = MI->getOperand(0);
- const MachineOperand &Src = MI->getOperand(1);
+ const MachineOperand &Dest = MI.getOperand(0);
+ const MachineOperand &Src = MI.getOperand(1);
MachineInstr *NewMI = nullptr;
// FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When
@@ -2749,11 +2865,11 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
bool DisableLEA16 = true;
bool is64Bit = Subtarget.is64Bit();
- unsigned MIOpc = MI->getOpcode();
+ unsigned MIOpc = MI.getOpcode();
switch (MIOpc) {
default: return nullptr;
case X86::SHL64ri: {
- assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
+ assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
@@ -2763,13 +2879,17 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
&X86::GR64_NOSPRegClass))
return nullptr;
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
- .addOperand(Dest)
- .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0);
+ NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r))
+ .addOperand(Dest)
+ .addReg(0)
+ .addImm(1ULL << ShAmt)
+ .addOperand(Src)
+ .addImm(0)
+ .addReg(0);
break;
}
case X86::SHL32ri: {
- assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
+ assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
@@ -2783,11 +2903,14 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
SrcReg, isKill, isUndef, ImplicitOp))
return nullptr;
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest)
- .addReg(0).addImm(1 << ShAmt)
- .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
- .addImm(0).addReg(0);
+ MachineInstrBuilder MIB =
+ BuildMI(MF, MI.getDebugLoc(), get(Opc))
+ .addOperand(Dest)
+ .addReg(0)
+ .addImm(1ULL << ShAmt)
+ .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
+ .addImm(0)
+ .addReg(0);
if (ImplicitOp.getReg() != 0)
MIB.addOperand(ImplicitOp);
NewMI = MIB;
@@ -2795,20 +2918,25 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
break;
}
case X86::SHL16ri: {
- assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
+ assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : nullptr;
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addOperand(Dest)
- .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0);
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
+ : nullptr;
+ NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r))
+ .addOperand(Dest)
+ .addReg(0)
+ .addImm(1ULL << ShAmt)
+ .addOperand(Src)
+ .addImm(0)
+ .addReg(0);
break;
}
case X86::INC64r:
case X86::INC32r: {
- assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
+ assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!");
unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
bool isKill, isUndef;
@@ -2818,9 +2946,11 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
SrcReg, isKill, isUndef, ImplicitOp))
return nullptr;
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest)
- .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef));
+ MachineInstrBuilder MIB =
+ BuildMI(MF, MI.getDebugLoc(), get(Opc))
+ .addOperand(Dest)
+ .addReg(SrcReg,
+ getKillRegState(isKill) | getUndefRegState(isUndef));
if (ImplicitOp.getReg() != 0)
MIB.addOperand(ImplicitOp);
@@ -2829,15 +2959,17 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
}
case X86::INC16r:
if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
: nullptr;
- assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
- NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addOperand(Dest).addOperand(Src), 1);
+ assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!");
+ NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r))
+ .addOperand(Dest)
+ .addOperand(Src),
+ 1);
break;
case X86::DEC64r:
case X86::DEC32r: {
- assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
+ assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!");
unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
@@ -2848,9 +2980,10 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
SrcReg, isKill, isUndef, ImplicitOp))
return nullptr;
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest)
- .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill));
+ MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
+ .addOperand(Dest)
+ .addReg(SrcReg, getUndefRegState(isUndef) |
+ getKillRegState(isKill));
if (ImplicitOp.getReg() != 0)
MIB.addOperand(ImplicitOp);
@@ -2860,17 +2993,19 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
}
case X86::DEC16r:
if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
: nullptr;
- assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
- NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addOperand(Dest).addOperand(Src), -1);
+ assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!");
+ NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r))
+ .addOperand(Dest)
+ .addOperand(Src),
+ -1);
break;
case X86::ADD64rr:
case X86::ADD64rr_DB:
case X86::ADD32rr:
case X86::ADD32rr_DB: {
- assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
+ assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc;
if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB)
Opc = X86::LEA64r;
@@ -2884,7 +3019,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
SrcReg, isKill, isUndef, ImplicitOp))
return nullptr;
- const MachineOperand &Src2 = MI->getOperand(2);
+ const MachineOperand &Src2 = MI.getOperand(2);
bool isKill2, isUndef2;
unsigned SrcReg2;
MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false);
@@ -2892,8 +3027,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
SrcReg2, isKill2, isUndef2, ImplicitOp2))
return nullptr;
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest);
+ MachineInstrBuilder MIB =
+ BuildMI(MF, MI.getDebugLoc(), get(Opc)).addOperand(Dest);
if (ImplicitOp.getReg() != 0)
MIB.addOperand(ImplicitOp);
if (ImplicitOp2.getReg() != 0)
@@ -2906,45 +3041,46 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
NewMI->getOperand(3).setIsUndef(isUndef2);
if (LV && Src2.isKill())
- LV->replaceKillInstruction(SrcReg2, MI, NewMI);
+ LV->replaceKillInstruction(SrcReg2, MI, *NewMI);
break;
}
case X86::ADD16rr:
case X86::ADD16rr_DB: {
if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
: nullptr;
- assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- unsigned Src2 = MI->getOperand(2).getReg();
- bool isKill2 = MI->getOperand(2).isKill();
- NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addOperand(Dest),
- Src.getReg(), Src.isKill(), Src2, isKill2);
+ assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
+ unsigned Src2 = MI.getOperand(2).getReg();
+ bool isKill2 = MI.getOperand(2).isKill();
+ NewMI = addRegReg(
+ BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).addOperand(Dest),
+ Src.getReg(), Src.isKill(), Src2, isKill2);
// Preserve undefness of the operands.
- bool isUndef = MI->getOperand(1).isUndef();
- bool isUndef2 = MI->getOperand(2).isUndef();
+ bool isUndef = MI.getOperand(1).isUndef();
+ bool isUndef2 = MI.getOperand(2).isUndef();
NewMI->getOperand(1).setIsUndef(isUndef);
NewMI->getOperand(3).setIsUndef(isUndef2);
if (LV && isKill2)
- LV->replaceKillInstruction(Src2, MI, NewMI);
+ LV->replaceKillInstruction(Src2, MI, *NewMI);
break;
}
case X86::ADD64ri32:
case X86::ADD64ri8:
case X86::ADD64ri32_DB:
case X86::ADD64ri8_DB:
- assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
- .addOperand(Dest).addOperand(Src),
- MI->getOperand(2).getImm());
+ assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
+ NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r))
+ .addOperand(Dest)
+ .addOperand(Src),
+ MI.getOperand(2).getImm());
break;
case X86::ADD32ri:
case X86::ADD32ri8:
case X86::ADD32ri_DB:
case X86::ADD32ri8_DB: {
- assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
+ assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
bool isKill, isUndef;
@@ -2954,13 +3090,14 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
SrcReg, isKill, isUndef, ImplicitOp))
return nullptr;
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest)
- .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill));
+ MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
+ .addOperand(Dest)
+ .addReg(SrcReg, getUndefRegState(isUndef) |
+ getKillRegState(isKill));
if (ImplicitOp.getReg() != 0)
MIB.addOperand(ImplicitOp);
- NewMI = addOffset(MIB, MI->getOperand(2).getImm());
+ NewMI = addOffset(MIB, MI.getOperand(2).getImm());
break;
}
case X86::ADD16ri:
@@ -2968,12 +3105,13 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::ADD16ri_DB:
case X86::ADD16ri8_DB:
if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
: nullptr;
- assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addOperand(Dest).addOperand(Src),
- MI->getOperand(2).getImm());
+ assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
+ NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r))
+ .addOperand(Dest)
+ .addOperand(Src),
+ MI.getOperand(2).getImm());
break;
}
@@ -2981,12 +3119,12 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (LV) { // Update live variables
if (Src.isKill())
- LV->replaceKillInstruction(Src.getReg(), MI, NewMI);
+ LV->replaceKillInstruction(Src.getReg(), MI, *NewMI);
if (Dest.isDead())
- LV->replaceKillInstruction(Dest.getReg(), MI, NewMI);
+ LV->replaceKillInstruction(Dest.getReg(), MI, *NewMI);
}
- MFI->insert(MBBI, NewMI); // Insert the new inst
+ MFI->insert(MI.getIterator(), NewMI); // Insert the new inst
return NewMI;
}
@@ -3142,11 +3280,16 @@ static bool isFMA3(unsigned Opcode, bool *IsIntrinsic = nullptr) {
llvm_unreachable("Opcode not handled by the switch");
}
-MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
- bool NewMI,
+MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const {
- switch (MI->getOpcode()) {
+ auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
+ if (NewMI)
+ return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
+ return MI;
+ };
+
+ switch (MI.getOpcode()) {
case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
@@ -3155,7 +3298,7 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
unsigned Opc;
unsigned Size;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
@@ -3164,15 +3307,12 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
}
- unsigned Amt = MI->getOperand(3).getImm();
- if (NewMI) {
- MachineFunction &MF = *MI->getParent()->getParent();
- MI = MF.CloneMachineInstr(MI);
- NewMI = false;
- }
- MI->setDesc(get(Opc));
- MI->getOperand(3).setImm(Size-Amt);
- return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
+ unsigned Amt = MI.getOperand(3).getImm();
+ auto &WorkingMI = cloneIfNew(MI);
+ WorkingMI.setDesc(get(Opc));
+ WorkingMI.getOperand(3).setImm(Size - Amt);
+ return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
+ OpIdx1, OpIdx2);
}
case X86::BLENDPDrri:
case X86::BLENDPSrri:
@@ -3186,7 +3326,7 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
case X86::VPBLENDDYrri:
case X86::VPBLENDWYrri:{
unsigned Mask;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::BLENDPDrri: Mask = 0x03; break;
case X86::BLENDPSrri: Mask = 0x0F; break;
@@ -3201,29 +3341,23 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
case X86::VPBLENDWYrri: Mask = 0xFF; break;
}
// Only the least significant bits of Imm are used.
- unsigned Imm = MI->getOperand(3).getImm() & Mask;
- if (NewMI) {
- MachineFunction &MF = *MI->getParent()->getParent();
- MI = MF.CloneMachineInstr(MI);
- NewMI = false;
- }
- MI->getOperand(3).setImm(Mask ^ Imm);
- return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
+ unsigned Imm = MI.getOperand(3).getImm() & Mask;
+ auto &WorkingMI = cloneIfNew(MI);
+ WorkingMI.getOperand(3).setImm(Mask ^ Imm);
+ return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
+ OpIdx1, OpIdx2);
}
case X86::PCLMULQDQrr:
case X86::VPCLMULQDQrr:{
// SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0]
// SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0]
- unsigned Imm = MI->getOperand(3).getImm();
+ unsigned Imm = MI.getOperand(3).getImm();
unsigned Src1Hi = Imm & 0x01;
unsigned Src2Hi = Imm & 0x10;
- if (NewMI) {
- MachineFunction &MF = *MI->getParent()->getParent();
- MI = MF.CloneMachineInstr(MI);
- NewMI = false;
- }
- MI->getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4));
- return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
+ auto &WorkingMI = cloneIfNew(MI);
+ WorkingMI.getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4));
+ return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
+ OpIdx1, OpIdx2);
}
case X86::CMPPDrri:
case X86::CMPPSrri:
@@ -3233,17 +3367,12 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
case X86::VCMPPSYrri: {
// Float comparison can be safely commuted for
// Ordered/Unordered/Equal/NotEqual tests
- unsigned Imm = MI->getOperand(3).getImm() & 0x7;
+ unsigned Imm = MI.getOperand(3).getImm() & 0x7;
switch (Imm) {
case 0x00: // EQUAL
case 0x03: // UNORDERED
case 0x04: // NOT EQUAL
case 0x07: // ORDERED
- if (NewMI) {
- MachineFunction &MF = *MI->getParent()->getParent();
- MI = MF.CloneMachineInstr(MI);
- NewMI = false;
- }
return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
default:
return nullptr;
@@ -3254,7 +3383,7 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
case X86::VPCOMQri: case X86::VPCOMUQri:
case X86::VPCOMWri: case X86::VPCOMUWri: {
// Flip comparison mode immediate (if necessary).
- unsigned Imm = MI->getOperand(3).getImm() & 0x7;
+ unsigned Imm = MI.getOperand(3).getImm() & 0x7;
switch (Imm) {
case 0x00: Imm = 0x02; break; // LT -> GT
case 0x01: Imm = 0x03; break; // LE -> GE
@@ -3267,13 +3396,21 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
default:
break;
}
- if (NewMI) {
- MachineFunction &MF = *MI->getParent()->getParent();
- MI = MF.CloneMachineInstr(MI);
- NewMI = false;
- }
- MI->getOperand(3).setImm(Imm);
- return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
+ auto &WorkingMI = cloneIfNew(MI);
+ WorkingMI.getOperand(3).setImm(Imm);
+ return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
+ OpIdx1, OpIdx2);
+ }
+ case X86::VPERM2F128rr:
+ case X86::VPERM2I128rr: {
+ // Flip permute source immediate.
+ // Imm & 0x02: lo = if set, select Op1.lo/hi else Op0.lo/hi.
+ // Imm & 0x20: hi = if set, select Op1.lo/hi else Op0.lo/hi.
+ unsigned Imm = MI.getOperand(3).getImm() & 0xFF;
+ auto &WorkingMI = cloneIfNew(MI);
+ WorkingMI.getOperand(3).setImm(Imm ^ 0x22);
+ return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
+ OpIdx1, OpIdx2);
}
case X86::CMOVB16rr: case X86::CMOVB32rr: case X86::CMOVB64rr:
case X86::CMOVAE16rr: case X86::CMOVAE32rr: case X86::CMOVAE64rr:
@@ -3292,7 +3429,7 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
case X86::CMOVO16rr: case X86::CMOVO32rr: case X86::CMOVO64rr:
case X86::CMOVNO16rr: case X86::CMOVNO32rr: case X86::CMOVNO64rr: {
unsigned Opc;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break;
case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break;
@@ -3343,31 +3480,27 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
case X86::CMOVNO32rr: Opc = X86::CMOVO32rr; break;
case X86::CMOVNO64rr: Opc = X86::CMOVO64rr; break;
}
- if (NewMI) {
- MachineFunction &MF = *MI->getParent()->getParent();
- MI = MF.CloneMachineInstr(MI);
- NewMI = false;
- }
- MI->setDesc(get(Opc));
- // Fallthrough intended.
+ auto &WorkingMI = cloneIfNew(MI);
+ WorkingMI.setDesc(get(Opc));
+ return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
+ OpIdx1, OpIdx2);
}
default:
- if (isFMA3(MI->getOpcode())) {
+ if (isFMA3(MI.getOpcode())) {
unsigned Opc = getFMA3OpcodeToCommuteOperands(MI, OpIdx1, OpIdx2);
if (Opc == 0)
return nullptr;
- if (NewMI) {
- MachineFunction &MF = *MI->getParent()->getParent();
- MI = MF.CloneMachineInstr(MI);
- NewMI = false;
- }
- MI->setDesc(get(Opc));
+ auto &WorkingMI = cloneIfNew(MI);
+ WorkingMI.setDesc(get(Opc));
+ return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
+ OpIdx1, OpIdx2);
}
+
return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
}
}
-bool X86InstrInfo::findFMA3CommutedOpIndices(MachineInstr *MI,
+bool X86InstrInfo::findFMA3CommutedOpIndices(MachineInstr &MI,
unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const {
@@ -3402,12 +3535,12 @@ bool X86InstrInfo::findFMA3CommutedOpIndices(MachineInstr *MI,
// CommutableOpIdx2 is well defined now. Let's choose another commutable
// operand and assign its index to CommutableOpIdx1.
- unsigned Op2Reg = MI->getOperand(CommutableOpIdx2).getReg();
+ unsigned Op2Reg = MI.getOperand(CommutableOpIdx2).getReg();
for (CommutableOpIdx1 = RegOpsNum; CommutableOpIdx1 > 0; CommutableOpIdx1--) {
// The commuted operands must have different registers.
// Otherwise, the commute transformation does not change anything and
// is useless then.
- if (Op2Reg != MI->getOperand(CommutableOpIdx1).getReg())
+ if (Op2Reg != MI.getOperand(CommutableOpIdx1).getReg())
break;
}
@@ -3427,14 +3560,13 @@ bool X86InstrInfo::findFMA3CommutedOpIndices(MachineInstr *MI,
return getFMA3OpcodeToCommuteOperands(MI, SrcOpIdx1, SrcOpIdx2) != 0;
}
-unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(MachineInstr *MI,
- unsigned SrcOpIdx1,
- unsigned SrcOpIdx2) const {
- unsigned Opc = MI->getOpcode();
+unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(
+ MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2) const {
+ unsigned Opc = MI.getOpcode();
// Define the array that holds FMA opcodes in groups
// of 3 opcodes(132, 213, 231) in each group.
- static const unsigned RegularOpcodeGroups[][3] = {
+ static const uint16_t RegularOpcodeGroups[][3] = {
{ X86::VFMADDSSr132r, X86::VFMADDSSr213r, X86::VFMADDSSr231r },
{ X86::VFMADDSDr132r, X86::VFMADDSDr213r, X86::VFMADDSDr231r },
{ X86::VFMADDPSr132r, X86::VFMADDPSr213r, X86::VFMADDPSr231r },
@@ -3508,7 +3640,7 @@ unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(MachineInstr *MI,
// Define the array that holds FMA*_Int opcodes in groups
// of 3 opcodes(132, 213, 231) in each group.
- static const unsigned IntrinOpcodeGroups[][3] = {
+ static const uint16_t IntrinOpcodeGroups[][3] = {
{ X86::VFMADDSSr132r_Int, X86::VFMADDSSr213r_Int, X86::VFMADDSSr231r_Int },
{ X86::VFMADDSDr132r_Int, X86::VFMADDSDr213r_Int, X86::VFMADDSDr231r_Int },
{ X86::VFMADDSSr132m_Int, X86::VFMADDSSr213m_Int, X86::VFMADDSSr231m_Int },
@@ -3539,7 +3671,7 @@ unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(MachineInstr *MI,
isFMA3(Opc, &IsIntrinOpcode);
size_t GroupsNum;
- const unsigned (*OpcodeGroups)[3];
+ const uint16_t (*OpcodeGroups)[3];
if (IsIntrinOpcode) {
GroupsNum = array_lengthof(IntrinOpcodeGroups);
OpcodeGroups = IntrinOpcodeGroups;
@@ -3548,7 +3680,7 @@ unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(MachineInstr *MI,
OpcodeGroups = RegularOpcodeGroups;
}
- const unsigned *FoundOpcodesGroup = nullptr;
+ const uint16_t *FoundOpcodesGroup = nullptr;
size_t FormIndex;
// Look for the input opcode in the corresponding opcodes table.
@@ -3616,34 +3748,33 @@ unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(MachineInstr *MI,
return FoundOpcodesGroup[FormIndex];
}
-bool X86InstrInfo::findCommutedOpIndices(MachineInstr *MI,
- unsigned &SrcOpIdx1,
+bool X86InstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const {
- switch (MI->getOpcode()) {
- case X86::CMPPDrri:
- case X86::CMPPSrri:
- case X86::VCMPPDrri:
- case X86::VCMPPSrri:
- case X86::VCMPPDYrri:
- case X86::VCMPPSYrri: {
- // Float comparison can be safely commuted for
- // Ordered/Unordered/Equal/NotEqual tests
- unsigned Imm = MI->getOperand(3).getImm() & 0x7;
- switch (Imm) {
- case 0x00: // EQUAL
- case 0x03: // UNORDERED
- case 0x04: // NOT EQUAL
- case 0x07: // ORDERED
- // The indices of the commutable operands are 1 and 2.
- // Assign them to the returned operand indices here.
- return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
- }
- return false;
+ switch (MI.getOpcode()) {
+ case X86::CMPPDrri:
+ case X86::CMPPSrri:
+ case X86::VCMPPDrri:
+ case X86::VCMPPSrri:
+ case X86::VCMPPDYrri:
+ case X86::VCMPPSYrri: {
+ // Float comparison can be safely commuted for
+ // Ordered/Unordered/Equal/NotEqual tests
+ unsigned Imm = MI.getOperand(3).getImm() & 0x7;
+ switch (Imm) {
+ case 0x00: // EQUAL
+ case 0x03: // UNORDERED
+ case 0x04: // NOT EQUAL
+ case 0x07: // ORDERED
+ // The indices of the commutable operands are 1 and 2.
+ // Assign them to the returned operand indices here.
+ return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
}
- default:
- if (isFMA3(MI->getOpcode()))
- return findFMA3CommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
- return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
+ return false;
+ }
+ default:
+ if (isFMA3(MI.getOpcode()))
+ return findFMA3CommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
+ return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
}
return false;
}
@@ -3791,6 +3922,8 @@ X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
case X86::COND_NP: return X86::COND_P;
case X86::COND_O: return X86::COND_NO;
case X86::COND_NO: return X86::COND_O;
+ case X86::COND_NE_OR_P: return X86::COND_E_AND_NP;
+ case X86::COND_E_AND_NP: return X86::COND_NE_OR_P;
}
}
@@ -3887,17 +4020,38 @@ unsigned X86::getCMovFromCond(CondCode CC, unsigned RegBytes,
}
}
-bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
- if (!MI->isTerminator()) return false;
+bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
+ if (!MI.isTerminator()) return false;
// Conditional branch is a special case.
- if (MI->isBranch() && !MI->isBarrier())
+ if (MI.isBranch() && !MI.isBarrier())
return true;
- if (!MI->isPredicable())
+ if (!MI.isPredicable())
return true;
return !isPredicated(MI);
}
+// Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may
+// not be a fallthrough MBB now due to layout changes). Return nullptr if the
+// fallthrough MBB cannot be identified.
+static MachineBasicBlock *getFallThroughMBB(MachineBasicBlock *MBB,
+ MachineBasicBlock *TBB) {
+ // Look for non-EHPad successors other than TBB. If we find exactly one, it
+ // is the fallthrough MBB. If we find zero, then TBB is both the target MBB
+ // and fallthrough MBB. If we find more than one, we cannot identify the
+ // fallthrough MBB and should return nullptr.
+ MachineBasicBlock *FallthroughBB = nullptr;
+ for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI) {
+ if ((*SI)->isEHPad() || (*SI == TBB && FallthroughBB))
+ continue;
+ // Return a nullptr if we found more than one fallthrough successor.
+ if (FallthroughBB && FallthroughBB != TBB)
+ return nullptr;
+ FallthroughBB = *SI;
+ }
+ return FallthroughBB;
+}
+
bool X86InstrInfo::AnalyzeBranchImpl(
MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
@@ -3914,7 +4068,7 @@ bool X86InstrInfo::AnalyzeBranchImpl(
// Working from the bottom, when we see a non-terminator instruction, we're
// done.
- if (!isUnpredicatedTerminator(I))
+ if (!isUnpredicatedTerminator(*I))
break;
// A terminator that isn't a branch can't easily be handled by this
@@ -4000,7 +4154,7 @@ bool X86InstrInfo::AnalyzeBranchImpl(
FBB = TBB;
TBB = I->getOperand(0).getMBB();
Cond.push_back(MachineOperand::CreateImm(BranchCode));
- CondBranches.push_back(I);
+ CondBranches.push_back(&*I);
continue;
}
@@ -4010,41 +4164,56 @@ bool X86InstrInfo::AnalyzeBranchImpl(
assert(Cond.size() == 1);
assert(TBB);
- // Only handle the case where all conditional branches branch to the same
- // destination.
- if (TBB != I->getOperand(0).getMBB())
- return true;
-
// If the conditions are the same, we can leave them alone.
X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm();
- if (OldBranchCode == BranchCode)
+ auto NewTBB = I->getOperand(0).getMBB();
+ if (OldBranchCode == BranchCode && TBB == NewTBB)
continue;
// If they differ, see if they fit one of the known patterns. Theoretically,
// we could handle more patterns here, but we shouldn't expect to see them
// if instruction selection has done a reasonable job.
- if ((OldBranchCode == X86::COND_NP &&
- BranchCode == X86::COND_E) ||
- (OldBranchCode == X86::COND_E &&
- BranchCode == X86::COND_NP))
- BranchCode = X86::COND_NP_OR_E;
- else if ((OldBranchCode == X86::COND_P &&
- BranchCode == X86::COND_NE) ||
- (OldBranchCode == X86::COND_NE &&
- BranchCode == X86::COND_P))
+ if (TBB == NewTBB &&
+ ((OldBranchCode == X86::COND_P && BranchCode == X86::COND_NE) ||
+ (OldBranchCode == X86::COND_NE && BranchCode == X86::COND_P))) {
BranchCode = X86::COND_NE_OR_P;
- else
+ } else if ((OldBranchCode == X86::COND_NP && BranchCode == X86::COND_NE) ||
+ (OldBranchCode == X86::COND_E && BranchCode == X86::COND_P)) {
+ if (NewTBB != (FBB ? FBB : getFallThroughMBB(&MBB, TBB)))
+ return true;
+
+ // X86::COND_E_AND_NP usually has two different branch destinations.
+ //
+ // JP B1
+ // JE B2
+ // JMP B1
+ // B1:
+ // B2:
+ //
+ // Here this condition branches to B2 only if NP && E. It has another
+ // equivalent form:
+ //
+ // JNE B1
+ // JNP B2
+ // JMP B1
+ // B1:
+ // B2:
+ //
+ // Similarly it branches to B2 only if E && NP. That is why this condition
+ // is named with COND_E_AND_NP.
+ BranchCode = X86::COND_E_AND_NP;
+ } else
return true;
// Update the MachineOperand.
Cond[0].setImm(BranchCode);
- CondBranches.push_back(I);
+ CondBranches.push_back(&*I);
}
return false;
}
-bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
@@ -4053,7 +4222,7 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify);
}
-bool X86InstrInfo::AnalyzeBranchPredicate(MachineBasicBlock &MBB,
+bool X86InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB,
MachineBranchPredicate &MBP,
bool AllowModify) const {
using namespace std::placeholders;
@@ -4142,10 +4311,11 @@ unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return Count;
}
-unsigned
-X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const {
+unsigned X86InstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
@@ -4158,17 +4328,13 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
return 1;
}
+ // If FBB is null, it is implied to be a fall-through block.
+ bool FallThru = FBB == nullptr;
+
// Conditional branch.
unsigned Count = 0;
X86::CondCode CC = (X86::CondCode)Cond[0].getImm();
switch (CC) {
- case X86::COND_NP_OR_E:
- // Synthesize NP_OR_E with two branches.
- BuildMI(&MBB, DL, get(X86::JNP_1)).addMBB(TBB);
- ++Count;
- BuildMI(&MBB, DL, get(X86::JE_1)).addMBB(TBB);
- ++Count;
- break;
case X86::COND_NE_OR_P:
// Synthesize NE_OR_P with two branches.
BuildMI(&MBB, DL, get(X86::JNE_1)).addMBB(TBB);
@@ -4176,13 +4342,26 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
BuildMI(&MBB, DL, get(X86::JP_1)).addMBB(TBB);
++Count;
break;
+ case X86::COND_E_AND_NP:
+ // Use the next block of MBB as FBB if it is null.
+ if (FBB == nullptr) {
+ FBB = getFallThroughMBB(&MBB, TBB);
+ assert(FBB && "MBB cannot be the last block in function when the false "
+ "body is a fall-through.");
+ }
+ // Synthesize COND_E_AND_NP with two branches.
+ BuildMI(&MBB, DL, get(X86::JNE_1)).addMBB(FBB);
+ ++Count;
+ BuildMI(&MBB, DL, get(X86::JNP_1)).addMBB(TBB);
+ ++Count;
+ break;
default: {
unsigned Opc = GetCondBranchFromCond(CC);
BuildMI(&MBB, DL, get(Opc)).addMBB(TBB);
++Count;
}
}
- if (FBB) {
+ if (!FallThru) {
// Two-way Conditional branch. Insert the second branch.
BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB);
++Count;
@@ -4228,15 +4407,16 @@ canInsertSelect(const MachineBasicBlock &MBB,
}
void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DstReg, ArrayRef<MachineOperand> Cond,
- unsigned TrueReg, unsigned FalseReg) const {
- MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
- assert(Cond.size() == 1 && "Invalid Cond array");
- unsigned Opc = getCMovFromCond((X86::CondCode)Cond[0].getImm(),
- MRI.getRegClass(DstReg)->getSize(),
- false/*HasMemoryOperand*/);
- BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(FalseReg).addReg(TrueReg);
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DstReg,
+ ArrayRef<MachineOperand> Cond, unsigned TrueReg,
+ unsigned FalseReg) const {
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ assert(Cond.size() == 1 && "Invalid Cond array");
+ unsigned Opc = getCMovFromCond((X86::CondCode)Cond[0].getImm(),
+ MRI.getRegClass(DstReg)->getSize(),
+ false /*HasMemoryOperand*/);
+ BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(FalseReg).addReg(TrueReg);
}
/// Test if the given register is a physical h register.
@@ -4258,16 +4438,18 @@ static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
if (X86::GR64RegClass.contains(DestReg)) {
if (X86::VR128XRegClass.contains(SrcReg))
// Copy from a VR128 register to a GR64 register.
- return HasAVX512 ? X86::VMOVPQIto64Zrr: (HasAVX ? X86::VMOVPQIto64rr :
- X86::MOVPQIto64rr);
+ return HasAVX512 ? X86::VMOVPQIto64Zrr :
+ HasAVX ? X86::VMOVPQIto64rr :
+ X86::MOVPQIto64rr;
if (X86::VR64RegClass.contains(SrcReg))
// Copy from a VR64 register to a GR64 register.
return X86::MMX_MOVD64from64rr;
} else if (X86::GR64RegClass.contains(SrcReg)) {
// Copy from a GR64 register to a VR128 register.
if (X86::VR128XRegClass.contains(DestReg))
- return HasAVX512 ? X86::VMOV64toPQIZrr: (HasAVX ? X86::VMOV64toPQIrr :
- X86::MOV64toPQIrr);
+ return HasAVX512 ? X86::VMOV64toPQIZrr :
+ HasAVX ? X86::VMOV64toPQIrr :
+ X86::MOV64toPQIrr;
// Copy from a GR64 register to a VR64 register.
if (X86::VR64RegClass.contains(DestReg))
return X86::MMX_MOVD64to64rr;
@@ -4276,22 +4458,30 @@ static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
// SrcReg(FR32) -> DestReg(GR32)
// SrcReg(GR32) -> DestReg(FR32)
- if (X86::GR32RegClass.contains(DestReg) && X86::FR32XRegClass.contains(SrcReg))
+ if (X86::GR32RegClass.contains(DestReg) &&
+ X86::FR32XRegClass.contains(SrcReg))
// Copy from a FR32 register to a GR32 register.
- return HasAVX512 ? X86::VMOVSS2DIZrr : (HasAVX ? X86::VMOVSS2DIrr : X86::MOVSS2DIrr);
+ return HasAVX512 ? X86::VMOVSS2DIZrr :
+ HasAVX ? X86::VMOVSS2DIrr :
+ X86::MOVSS2DIrr;
- if (X86::FR32XRegClass.contains(DestReg) && X86::GR32RegClass.contains(SrcReg))
+ if (X86::FR32XRegClass.contains(DestReg) &&
+ X86::GR32RegClass.contains(SrcReg))
// Copy from a GR32 register to a FR32 register.
- return HasAVX512 ? X86::VMOVDI2SSZrr : (HasAVX ? X86::VMOVDI2SSrr : X86::MOVDI2SSrr);
+ return HasAVX512 ? X86::VMOVDI2SSZrr :
+ HasAVX ? X86::VMOVDI2SSrr :
+ X86::MOVDI2SSrr;
return 0;
}
+static bool isMaskRegClass(const TargetRegisterClass *RC) {
+ // All KMASK RegClasses hold the same k registers, can be tested against anyone.
+ return X86::VK16RegClass.hasSubClassEq(RC);
+}
+
static bool MaskRegClassContains(unsigned Reg) {
- return X86::VK8RegClass.contains(Reg) ||
- X86::VK16RegClass.contains(Reg) ||
- X86::VK32RegClass.contains(Reg) ||
- X86::VK64RegClass.contains(Reg) ||
- X86::VK1RegClass.contains(Reg);
+ // All KMASK RegClasses hold the same k registers, can be tested against anyone.
+ return X86::VK16RegClass.contains(Reg);
}
static bool GRRegClassContains(unsigned Reg) {
@@ -4338,13 +4528,22 @@ unsigned copyPhysRegOpcode_AVX512(unsigned& DestReg, unsigned& SrcReg,
if (Subtarget.hasBWI())
if (auto Opc = copyPhysRegOpcode_AVX512_BW(DestReg, SrcReg))
return Opc;
- if (X86::VR128XRegClass.contains(DestReg, SrcReg) ||
- X86::VR256XRegClass.contains(DestReg, SrcReg) ||
- X86::VR512RegClass.contains(DestReg, SrcReg)) {
- DestReg = get512BitSuperRegister(DestReg);
- SrcReg = get512BitSuperRegister(SrcReg);
+ if (X86::VR128XRegClass.contains(DestReg, SrcReg)) {
+ if (Subtarget.hasVLX())
+ return X86::VMOVAPSZ128rr;
+ DestReg = get512BitSuperRegister(DestReg);
+ SrcReg = get512BitSuperRegister(SrcReg);
+ return X86::VMOVAPSZrr;
+ }
+ if (X86::VR256XRegClass.contains(DestReg, SrcReg)) {
+ if (Subtarget.hasVLX())
+ return X86::VMOVAPSZ256rr;
+ DestReg = get512BitSuperRegister(DestReg);
+ SrcReg = get512BitSuperRegister(SrcReg);
+ return X86::VMOVAPSZrr;
+ }
+ if (X86::VR512RegClass.contains(DestReg, SrcReg))
return X86::VMOVAPSZrr;
- }
if (MaskRegClassContains(DestReg) && MaskRegClassContains(SrcReg))
return X86::KMOVWkk;
if (MaskRegClassContains(DestReg) && GRRegClassContains(SrcReg)) {
@@ -4359,9 +4558,9 @@ unsigned copyPhysRegOpcode_AVX512(unsigned& DestReg, unsigned& SrcReg,
}
void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
+ MachineBasicBlock::iterator MI,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
// First deal with the normal symmetric copies.
bool HasAVX = Subtarget.hasAVX();
bool HasAVX512 = Subtarget.hasAVX512();
@@ -4455,22 +4654,33 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
// first frame index.
// See X86ISelLowering.cpp - X86::hasCopyImplyingStackAdjustment.
-
- bool AXDead = (Reg == AX) ||
- (MachineBasicBlock::LQR_Dead ==
- MBB.computeRegisterLiveness(&getRegisterInfo(), AX, MI));
- if (!AXDead) {
- // FIXME: If computeRegisterLiveness() reported LQR_Unknown then AX may
- // actually be dead. This is not a problem for correctness as we are just
- // (unnecessarily) saving+restoring a dead register. However the
- // MachineVerifier expects operands that read from dead registers
- // to be marked with the "undef" flag.
- // An example of this can be found in
- // test/CodeGen/X86/peephole-na-phys-copy-folding.ll and
- // test/CodeGen/X86/cmpxchg-clobber-flags.ll when using
- // -verify-machineinstrs.
- BuildMI(MBB, MI, DL, get(Push)).addReg(AX, getKillRegState(true));
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+ MachineBasicBlock::LivenessQueryResult LQR =
+ MBB.computeRegisterLiveness(TRI, AX, MI);
+ // We do not want to save and restore AX if we do not have to.
+ // Moreover, if we do so whereas AX is dead, we would need to set
+ // an undef flag on the use of AX, otherwise the verifier will
+ // complain that we read an undef value.
+ // We do not want to change the behavior of the machine verifier
+ // as this is usually wrong to read an undef value.
+ if (MachineBasicBlock::LQR_Unknown == LQR) {
+ LivePhysRegs LPR(TRI);
+ LPR.addLiveOuts(MBB);
+ MachineBasicBlock::iterator I = MBB.end();
+ while (I != MI) {
+ --I;
+ LPR.stepBackward(*I);
+ }
+ // AX contains the top most register in the aliasing hierarchy.
+ // It may not be live, but one of its aliases may be.
+ for (MCRegAliasIterator AI(AX, TRI, true);
+ AI.isValid() && LQR != MachineBasicBlock::LQR_Live; ++AI)
+ LQR = LPR.contains(*AI) ? MachineBasicBlock::LQR_Live
+ : MachineBasicBlock::LQR_Dead;
}
+ bool AXDead = (Reg == AX) || (MachineBasicBlock::LQR_Dead == LQR);
+ if (!AXDead)
+ BuildMI(MBB, MI, DL, get(Push)).addReg(AX, getKillRegState(true));
if (FromEFLAGS) {
BuildMI(MBB, MI, DL, get(X86::SETOr), X86::AL);
BuildMI(MBB, MI, DL, get(X86::LAHF));
@@ -4493,15 +4703,28 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
llvm_unreachable("Cannot emit physreg copy instruction");
}
+static unsigned getLoadStoreMaskRegOpcode(const TargetRegisterClass *RC,
+ bool load) {
+ switch (RC->getSize()) {
+ default:
+ llvm_unreachable("Unknown spill size");
+ case 2:
+ return load ? X86::KMOVWkm : X86::KMOVWmk;
+ case 4:
+ return load ? X86::KMOVDkm : X86::KMOVDmk;
+ case 8:
+ return load ? X86::KMOVQkm : X86::KMOVQmk;
+ }
+}
+
static unsigned getLoadStoreRegOpcode(unsigned Reg,
const TargetRegisterClass *RC,
bool isStackAligned,
const X86Subtarget &STI,
bool load) {
if (STI.hasAVX512()) {
- if (X86::VK8RegClass.hasSubClassEq(RC) ||
- X86::VK16RegClass.hasSubClassEq(RC))
- return load ? X86::KMOVWkm : X86::KMOVWmk;
+ if (isMaskRegClass(RC))
+ return getLoadStoreMaskRegOpcode(RC, load);
if (RC->getSize() == 4 && X86::FR32XRegClass.hasSubClassEq(RC))
return load ? X86::VMOVSSZrm : X86::VMOVSSZmr;
if (RC->getSize() == 8 && X86::FR64XRegClass.hasSubClassEq(RC))
@@ -4554,25 +4777,38 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
assert((X86::VR128RegClass.hasSubClassEq(RC) ||
X86::VR128XRegClass.hasSubClassEq(RC))&& "Unknown 16-byte regclass");
// If stack is realigned we can use aligned stores.
+ if (X86::VR128RegClass.hasSubClassEq(RC)) {
+ if (isStackAligned)
+ return load ? (HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
+ : (HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
+ else
+ return load ? (HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
+ : (HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
+ }
+ assert(STI.hasVLX() && "Using extended register requires VLX");
if (isStackAligned)
- return load ?
- (HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm) :
- (HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
+ return load ? X86::VMOVAPSZ128rm : X86::VMOVAPSZ128mr;
else
- return load ?
- (HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm) :
- (HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
+ return load ? X86::VMOVUPSZ128rm : X86::VMOVUPSZ128mr;
}
case 32:
assert((X86::VR256RegClass.hasSubClassEq(RC) ||
X86::VR256XRegClass.hasSubClassEq(RC)) && "Unknown 32-byte regclass");
// If stack is realigned we can use aligned stores.
+ if (X86::VR256RegClass.hasSubClassEq(RC)) {
+ if (isStackAligned)
+ return load ? X86::VMOVAPSYrm : X86::VMOVAPSYmr;
+ else
+ return load ? X86::VMOVUPSYrm : X86::VMOVUPSYmr;
+ }
+ assert(STI.hasVLX() && "Using extended register requires VLX");
if (isStackAligned)
- return load ? X86::VMOVAPSYrm : X86::VMOVAPSYmr;
+ return load ? X86::VMOVAPSZ256rm : X86::VMOVAPSZ256mr;
else
- return load ? X86::VMOVUPSYrm : X86::VMOVUPSYmr;
+ return load ? X86::VMOVUPSZ256rm : X86::VMOVUPSZ256mr;
case 64:
assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass");
+ assert(STI.hasVLX() && "Using 512-bit register requires AVX512");
if (isStackAligned)
return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
else
@@ -4580,25 +4816,29 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
}
}
-bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg,
- unsigned &Offset,
+bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg,
+ int64_t &Offset,
const TargetRegisterInfo *TRI) const {
- const MCInstrDesc &Desc = MemOp->getDesc();
- int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags, MemOp->getOpcode());
+ const MCInstrDesc &Desc = MemOp.getDesc();
+ int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
if (MemRefBegin < 0)
return false;
MemRefBegin += X86II::getOperandBias(Desc);
- BaseReg = MemOp->getOperand(MemRefBegin + X86::AddrBaseReg).getReg();
- if (MemOp->getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
+ MachineOperand &BaseMO = MemOp.getOperand(MemRefBegin + X86::AddrBaseReg);
+ if (!BaseMO.isReg()) // Can be an MO_FrameIndex
+ return false;
+
+ BaseReg = BaseMO.getReg();
+ if (MemOp.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
return false;
- if (MemOp->getOperand(MemRefBegin + X86::AddrIndexReg).getReg() !=
+ if (MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() !=
X86::NoRegister)
return false;
- const MachineOperand &DispMO = MemOp->getOperand(MemRefBegin + X86::AddrDisp);
+ const MachineOperand &DispMO = MemOp.getOperand(MemRefBegin + X86::AddrDisp);
// Displacement can be symbolic
if (!DispMO.isImm())
@@ -4606,8 +4846,8 @@ bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg,
Offset = DispMO.getImm();
- return (MemOp->getOperand(MemRefBegin + X86::AddrIndexReg).getReg() ==
- X86::NoRegister);
+ return MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() ==
+ X86::NoRegister;
}
static unsigned getStoreRegOpcode(unsigned SrcReg,
@@ -4697,10 +4937,10 @@ void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
NewMIs.push_back(MIB);
}
-bool X86InstrInfo::
-analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
- int &CmpMask, int &CmpValue) const {
- switch (MI->getOpcode()) {
+bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &CmpMask,
+ int &CmpValue) const {
+ switch (MI.getOpcode()) {
default: break;
case X86::CMP64ri32:
case X86::CMP64ri8:
@@ -4709,17 +4949,17 @@ analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
case X86::CMP16ri:
case X86::CMP16ri8:
case X86::CMP8ri:
- SrcReg = MI->getOperand(0).getReg();
+ SrcReg = MI.getOperand(0).getReg();
SrcReg2 = 0;
CmpMask = ~0;
- CmpValue = MI->getOperand(1).getImm();
+ CmpValue = MI.getOperand(1).getImm();
return true;
// A SUB can be used to perform comparison.
case X86::SUB64rm:
case X86::SUB32rm:
case X86::SUB16rm:
case X86::SUB8rm:
- SrcReg = MI->getOperand(1).getReg();
+ SrcReg = MI.getOperand(1).getReg();
SrcReg2 = 0;
CmpMask = ~0;
CmpValue = 0;
@@ -4728,8 +4968,8 @@ analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
case X86::SUB32rr:
case X86::SUB16rr:
case X86::SUB8rr:
- SrcReg = MI->getOperand(1).getReg();
- SrcReg2 = MI->getOperand(2).getReg();
+ SrcReg = MI.getOperand(1).getReg();
+ SrcReg2 = MI.getOperand(2).getReg();
CmpMask = ~0;
CmpValue = 0;
return true;
@@ -4740,17 +4980,17 @@ analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
case X86::SUB16ri:
case X86::SUB16ri8:
case X86::SUB8ri:
- SrcReg = MI->getOperand(1).getReg();
+ SrcReg = MI.getOperand(1).getReg();
SrcReg2 = 0;
CmpMask = ~0;
- CmpValue = MI->getOperand(2).getImm();
+ CmpValue = MI.getOperand(2).getImm();
return true;
case X86::CMP64rr:
case X86::CMP32rr:
case X86::CMP16rr:
case X86::CMP8rr:
- SrcReg = MI->getOperand(0).getReg();
- SrcReg2 = MI->getOperand(1).getReg();
+ SrcReg = MI.getOperand(0).getReg();
+ SrcReg2 = MI.getOperand(1).getReg();
CmpMask = ~0;
CmpValue = 0;
return true;
@@ -4758,8 +4998,9 @@ analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
case X86::TEST16rr:
case X86::TEST32rr:
case X86::TEST64rr:
- SrcReg = MI->getOperand(0).getReg();
- if (MI->getOperand(1).getReg() != SrcReg) return false;
+ SrcReg = MI.getOperand(0).getReg();
+ if (MI.getOperand(1).getReg() != SrcReg)
+ return false;
// Compare against zero.
SrcReg2 = 0;
CmpMask = ~0;
@@ -4775,47 +5016,40 @@ analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
/// This function can be extended later on.
/// SrcReg, SrcRegs: register operands for FlagI.
/// ImmValue: immediate for FlagI if it takes an immediate.
-inline static bool isRedundantFlagInstr(MachineInstr *FlagI, unsigned SrcReg,
+inline static bool isRedundantFlagInstr(MachineInstr &FlagI, unsigned SrcReg,
unsigned SrcReg2, int ImmValue,
- MachineInstr *OI) {
- if (((FlagI->getOpcode() == X86::CMP64rr &&
- OI->getOpcode() == X86::SUB64rr) ||
- (FlagI->getOpcode() == X86::CMP32rr &&
- OI->getOpcode() == X86::SUB32rr)||
- (FlagI->getOpcode() == X86::CMP16rr &&
- OI->getOpcode() == X86::SUB16rr)||
- (FlagI->getOpcode() == X86::CMP8rr &&
- OI->getOpcode() == X86::SUB8rr)) &&
- ((OI->getOperand(1).getReg() == SrcReg &&
- OI->getOperand(2).getReg() == SrcReg2) ||
- (OI->getOperand(1).getReg() == SrcReg2 &&
- OI->getOperand(2).getReg() == SrcReg)))
+ MachineInstr &OI) {
+ if (((FlagI.getOpcode() == X86::CMP64rr && OI.getOpcode() == X86::SUB64rr) ||
+ (FlagI.getOpcode() == X86::CMP32rr && OI.getOpcode() == X86::SUB32rr) ||
+ (FlagI.getOpcode() == X86::CMP16rr && OI.getOpcode() == X86::SUB16rr) ||
+ (FlagI.getOpcode() == X86::CMP8rr && OI.getOpcode() == X86::SUB8rr)) &&
+ ((OI.getOperand(1).getReg() == SrcReg &&
+ OI.getOperand(2).getReg() == SrcReg2) ||
+ (OI.getOperand(1).getReg() == SrcReg2 &&
+ OI.getOperand(2).getReg() == SrcReg)))
return true;
- if (((FlagI->getOpcode() == X86::CMP64ri32 &&
- OI->getOpcode() == X86::SUB64ri32) ||
- (FlagI->getOpcode() == X86::CMP64ri8 &&
- OI->getOpcode() == X86::SUB64ri8) ||
- (FlagI->getOpcode() == X86::CMP32ri &&
- OI->getOpcode() == X86::SUB32ri) ||
- (FlagI->getOpcode() == X86::CMP32ri8 &&
- OI->getOpcode() == X86::SUB32ri8) ||
- (FlagI->getOpcode() == X86::CMP16ri &&
- OI->getOpcode() == X86::SUB16ri) ||
- (FlagI->getOpcode() == X86::CMP16ri8 &&
- OI->getOpcode() == X86::SUB16ri8) ||
- (FlagI->getOpcode() == X86::CMP8ri &&
- OI->getOpcode() == X86::SUB8ri)) &&
- OI->getOperand(1).getReg() == SrcReg &&
- OI->getOperand(2).getImm() == ImmValue)
+ if (((FlagI.getOpcode() == X86::CMP64ri32 &&
+ OI.getOpcode() == X86::SUB64ri32) ||
+ (FlagI.getOpcode() == X86::CMP64ri8 &&
+ OI.getOpcode() == X86::SUB64ri8) ||
+ (FlagI.getOpcode() == X86::CMP32ri && OI.getOpcode() == X86::SUB32ri) ||
+ (FlagI.getOpcode() == X86::CMP32ri8 &&
+ OI.getOpcode() == X86::SUB32ri8) ||
+ (FlagI.getOpcode() == X86::CMP16ri && OI.getOpcode() == X86::SUB16ri) ||
+ (FlagI.getOpcode() == X86::CMP16ri8 &&
+ OI.getOpcode() == X86::SUB16ri8) ||
+ (FlagI.getOpcode() == X86::CMP8ri && OI.getOpcode() == X86::SUB8ri)) &&
+ OI.getOperand(1).getReg() == SrcReg &&
+ OI.getOperand(2).getImm() == ImmValue)
return true;
return false;
}
/// Check whether the definition can be converted
/// to remove a comparison against zero.
-inline static bool isDefConvertible(MachineInstr *MI) {
- switch (MI->getOpcode()) {
+inline static bool isDefConvertible(MachineInstr &MI) {
+ switch (MI.getOpcode()) {
default: return false;
// The shift instructions only modify ZF if their shift count is non-zero.
@@ -4899,8 +5133,8 @@ inline static bool isDefConvertible(MachineInstr *MI) {
}
/// Check whether the use can be converted to remove a comparison against zero.
-static X86::CondCode isUseDefConvertible(MachineInstr *MI) {
- switch (MI->getOpcode()) {
+static X86::CondCode isUseDefConvertible(MachineInstr &MI) {
+ switch (MI.getOpcode()) {
default: return X86::COND_INVALID;
case X86::LZCNT16rr: case X86::LZCNT16rm:
case X86::LZCNT32rr: case X86::LZCNT32rm:
@@ -4920,13 +5154,13 @@ static X86::CondCode isUseDefConvertible(MachineInstr *MI) {
/// Check if there exists an earlier instruction that
/// operates on the same source operands and sets flags in the same way as
/// Compare; remove Compare if possible.
-bool X86InstrInfo::
-optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
- int CmpMask, int CmpValue,
- const MachineRegisterInfo *MRI) const {
+bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
+ unsigned SrcReg2, int CmpMask,
+ int CmpValue,
+ const MachineRegisterInfo *MRI) const {
// Check whether we can replace SUB with CMP.
unsigned NewOpcode = 0;
- switch (CmpInstr->getOpcode()) {
+ switch (CmpInstr.getOpcode()) {
default: break;
case X86::SUB64ri32:
case X86::SUB64ri8:
@@ -4943,10 +5177,10 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
case X86::SUB32rr:
case X86::SUB16rr:
case X86::SUB8rr: {
- if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
+ if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
return false;
// There is no use of the destination register, we can replace SUB with CMP.
- switch (CmpInstr->getOpcode()) {
+ switch (CmpInstr.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::SUB64rm: NewOpcode = X86::CMP64rm; break;
case X86::SUB32rm: NewOpcode = X86::CMP32rm; break;
@@ -4964,8 +5198,8 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
case X86::SUB16ri8: NewOpcode = X86::CMP16ri8; break;
case X86::SUB8ri: NewOpcode = X86::CMP8ri; break;
}
- CmpInstr->setDesc(get(NewOpcode));
- CmpInstr->RemoveOperand(0);
+ CmpInstr.setDesc(get(NewOpcode));
+ CmpInstr.RemoveOperand(0);
// Fall through to optimize Cmp if Cmp is CMPrr or CMPri.
if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm ||
NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm)
@@ -4983,7 +5217,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// If we are comparing against zero, check whether we can use MI to update
// EFLAGS. If MI is not in the same BB as CmpInstr, do not optimize.
bool IsCmpZero = (SrcReg2 == 0 && CmpValue == 0);
- if (IsCmpZero && MI->getParent() != CmpInstr->getParent())
+ if (IsCmpZero && MI->getParent() != CmpInstr.getParent())
return false;
// If we have a use of the source register between the def and our compare
@@ -4991,19 +5225,20 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// right way.
bool ShouldUpdateCC = false;
X86::CondCode NewCC = X86::COND_INVALID;
- if (IsCmpZero && !isDefConvertible(MI)) {
+ if (IsCmpZero && !isDefConvertible(*MI)) {
// Scan forward from the use until we hit the use we're looking for or the
// compare instruction.
for (MachineBasicBlock::iterator J = MI;; ++J) {
// Do we have a convertible instruction?
- NewCC = isUseDefConvertible(J);
+ NewCC = isUseDefConvertible(*J);
if (NewCC != X86::COND_INVALID && J->getOperand(1).isReg() &&
J->getOperand(1).getReg() == SrcReg) {
assert(J->definesRegister(X86::EFLAGS) && "Must be an EFLAGS def!");
ShouldUpdateCC = true; // Update CC later on.
// This is not a def of SrcReg, but still a def of EFLAGS. Keep going
// with the new def.
- MI = Def = J;
+ Def = J;
+ MI = &*Def;
break;
}
@@ -5024,29 +5259,29 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// otherwise, RE is the rend of the basic block.
MachineBasicBlock::reverse_iterator
RI = MachineBasicBlock::reverse_iterator(I),
- RE = CmpInstr->getParent() == MI->getParent() ?
- MachineBasicBlock::reverse_iterator(++Def) /* points to MI */ :
- CmpInstr->getParent()->rend();
+ RE = CmpInstr.getParent() == MI->getParent()
+ ? MachineBasicBlock::reverse_iterator(++Def) /* points to MI */
+ : CmpInstr.getParent()->rend();
MachineInstr *Movr0Inst = nullptr;
for (; RI != RE; ++RI) {
- MachineInstr *Instr = &*RI;
+ MachineInstr &Instr = *RI;
// Check whether CmpInstr can be made redundant by the current instruction.
if (!IsCmpZero &&
isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpValue, Instr)) {
- Sub = Instr;
+ Sub = &Instr;
break;
}
- if (Instr->modifiesRegister(X86::EFLAGS, TRI) ||
- Instr->readsRegister(X86::EFLAGS, TRI)) {
+ if (Instr.modifiesRegister(X86::EFLAGS, TRI) ||
+ Instr.readsRegister(X86::EFLAGS, TRI)) {
// This instruction modifies or uses EFLAGS.
// MOV32r0 etc. are implemented with xor which clobbers condition code.
// They are safe to move up, if the definition to EFLAGS is dead and
// earlier instructions do not read or write EFLAGS.
- if (!Movr0Inst && Instr->getOpcode() == X86::MOV32r0 &&
- Instr->registerDefIsDead(X86::EFLAGS, TRI)) {
- Movr0Inst = Instr;
+ if (!Movr0Inst && Instr.getOpcode() == X86::MOV32r0 &&
+ Instr.registerDefIsDead(X86::EFLAGS, TRI)) {
+ Movr0Inst = &Instr;
continue;
}
@@ -5068,7 +5303,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// live-out.
bool IsSafe = false;
SmallVector<std::pair<MachineInstr*, unsigned /*NewOpc*/>, 4> OpsToUpdate;
- MachineBasicBlock::iterator E = CmpInstr->getParent()->end();
+ MachineBasicBlock::iterator E = CmpInstr.getParent()->end();
for (++I; I != E; ++I) {
const MachineInstr &Instr = *I;
bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI);
@@ -5159,7 +5394,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// If EFLAGS is not killed nor re-defined, we should check whether it is
// live-out. If it is live-out, do not optimize.
if ((IsCmpZero || IsSwapped) && !IsSafe) {
- MachineBasicBlock *MBB = CmpInstr->getParent();
+ MachineBasicBlock *MBB = CmpInstr.getParent();
for (MachineBasicBlock *Successor : MBB->successors())
if (Successor->isLiveIn(X86::EFLAGS))
return false;
@@ -5199,7 +5434,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
}
assert(i != e && "Unable to locate a def EFLAGS operand");
- CmpInstr->eraseFromParent();
+ CmpInstr.eraseFromParent();
// Modify the condition code of instructions in OpsToUpdate.
for (auto &Op : OpsToUpdate)
@@ -5211,14 +5446,14 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
/// operand at the use. We fold the load instructions if load defines a virtual
/// register, the virtual register is used once in the same BB, and the
/// instructions in-between do not load or store, and have no side effects.
-MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr *MI,
+MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI,
const MachineRegisterInfo *MRI,
unsigned &FoldAsLoadDefReg,
MachineInstr *&DefMI) const {
if (FoldAsLoadDefReg == 0)
return nullptr;
// To be conservative, if there exists another load, clear the load candidate.
- if (MI->mayLoad()) {
+ if (MI.mayLoad()) {
FoldAsLoadDefReg = 0;
return nullptr;
}
@@ -5233,8 +5468,8 @@ MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr *MI,
// Collect information about virtual register operands of MI.
unsigned SrcOperandId = 0;
bool FoundSrcOperand = false;
- for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
+ for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
@@ -5251,7 +5486,7 @@ MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr *MI,
return nullptr;
// Check whether we can fold the def into SrcOperandId.
- if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandId, DefMI)) {
+ if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandId, *DefMI)) {
FoldAsLoadDefReg = 0;
return FoldMI;
}
@@ -5313,6 +5548,60 @@ static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII,
return true;
}
+bool X86InstrInfo::ExpandMOVImmSExti8(MachineInstrBuilder &MIB) const {
+ MachineBasicBlock &MBB = *MIB->getParent();
+ DebugLoc DL = MIB->getDebugLoc();
+ int64_t Imm = MIB->getOperand(1).getImm();
+ assert(Imm != 0 && "Using push/pop for 0 is not efficient.");
+ MachineBasicBlock::iterator I = MIB.getInstr();
+
+ int StackAdjustment;
+
+ if (Subtarget.is64Bit()) {
+ assert(MIB->getOpcode() == X86::MOV64ImmSExti8 ||
+ MIB->getOpcode() == X86::MOV32ImmSExti8);
+
+ // Can't use push/pop lowering if the function might write to the red zone.
+ X86MachineFunctionInfo *X86FI =
+ MBB.getParent()->getInfo<X86MachineFunctionInfo>();
+ if (X86FI->getUsesRedZone()) {
+ MIB->setDesc(get(MIB->getOpcode() == X86::MOV32ImmSExti8 ? X86::MOV32ri
+ : X86::MOV64ri));
+ return true;
+ }
+
+ // 64-bit mode doesn't have 32-bit push/pop, so use 64-bit operations and
+ // widen the register if necessary.
+ StackAdjustment = 8;
+ BuildMI(MBB, I, DL, get(X86::PUSH64i8)).addImm(Imm);
+ MIB->setDesc(get(X86::POP64r));
+ MIB->getOperand(0)
+ .setReg(getX86SubSuperRegister(MIB->getOperand(0).getReg(), 64));
+ } else {
+ assert(MIB->getOpcode() == X86::MOV32ImmSExti8);
+ StackAdjustment = 4;
+ BuildMI(MBB, I, DL, get(X86::PUSH32i8)).addImm(Imm);
+ MIB->setDesc(get(X86::POP32r));
+ }
+
+ // Build CFI if necessary.
+ MachineFunction &MF = *MBB.getParent();
+ const X86FrameLowering *TFL = Subtarget.getFrameLowering();
+ bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
+ bool NeedsDwarfCFI =
+ !IsWin64Prologue &&
+ (MF.getMMI().hasDebugInfo() || MF.getFunction()->needsUnwindTableEntry());
+ bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI;
+ if (EmitCFI) {
+ TFL->BuildCFI(MBB, I, DL,
+ MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment));
+ TFL->BuildCFI(MBB, std::next(I), DL,
+ MCCFIInstruction::createAdjustCfaOffset(nullptr, -StackAdjustment));
+ }
+
+ return true;
+}
+
// LoadStackGuard has so far only been implemented for 64-bit MachO. Different
// code sequence is needed for other targets.
static void expandLoadStackGuard(MachineInstrBuilder &MIB,
@@ -5322,9 +5611,9 @@ static void expandLoadStackGuard(MachineInstrBuilder &MIB,
unsigned Reg = MIB->getOperand(0).getReg();
const GlobalValue *GV =
cast<GlobalValue>((*MIB->memoperands_begin())->getValue());
- unsigned Flag = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant;
+ auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant;
MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand(
- MachinePointerInfo::getGOT(*MBB.getParent()), Flag, 8, 8);
+ MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 8, 8);
MachineBasicBlock::iterator I = MIB.getInstr();
BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1)
@@ -5335,16 +5624,19 @@ static void expandLoadStackGuard(MachineInstrBuilder &MIB,
MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0);
}
-bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
+bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
bool HasAVX = Subtarget.hasAVX();
- MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
- switch (MI->getOpcode()) {
+ MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
+ switch (MI.getOpcode()) {
case X86::MOV32r0:
return Expand2AddrUndef(MIB, get(X86::XOR32rr));
case X86::MOV32r1:
return expandMOV32r1(MIB, *this, /*MinusOne=*/ false);
case X86::MOV32r_1:
return expandMOV32r1(MIB, *this, /*MinusOne=*/ true);
+ case X86::MOV32ImmSExti8:
+ case X86::MOV64ImmSExti8:
+ return ExpandMOVImmSExti8(MIB);
case X86::SETB_C8r:
return Expand2AddrUndef(MIB, get(X86::SBB8rr));
case X86::SETB_C16r:
@@ -5360,17 +5652,30 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
case X86::AVX_SET0:
assert(HasAVX && "AVX not supported");
return Expand2AddrUndef(MIB, get(X86::VXORPSYrr));
+ case X86::AVX512_128_SET0:
+ return Expand2AddrUndef(MIB, get(X86::VPXORDZ128rr));
+ case X86::AVX512_256_SET0:
+ return Expand2AddrUndef(MIB, get(X86::VPXORDZ256rr));
case X86::AVX512_512_SET0:
return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
case X86::V_SETALLONES:
return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
case X86::AVX2_SETALLONES:
return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr));
+ case X86::AVX512_512_SETALLONES: {
+ unsigned Reg = MIB->getOperand(0).getReg();
+ MIB->setDesc(get(X86::VPTERNLOGDZrri));
+ // VPTERNLOGD needs 3 register inputs and an immediate.
+ // 0xff will return 1s for any input.
+ MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef)
+ .addReg(Reg, RegState::Undef).addImm(0xff);
+ return true;
+ }
case X86::TEST8ri_NOREX:
- MI->setDesc(get(X86::TEST8ri));
+ MI.setDesc(get(X86::TEST8ri));
return true;
case X86::MOV32ri64:
- MI->setDesc(get(X86::MOV32ri));
+ MI.setDesc(get(X86::MOV32ri));
return true;
// KNL does not recognize dependency-breaking idioms for mask registers,
@@ -5422,23 +5727,23 @@ static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs,
static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
- MachineInstr *MI,
+ MachineInstr &MI,
const TargetInstrInfo &TII) {
// Create the base instruction with the memory operand as the first part.
// Omit the implicit operands, something BuildMI can't do.
- MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
- MI->getDebugLoc(), true);
+ MachineInstr *NewMI =
+ MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
MachineInstrBuilder MIB(MF, NewMI);
addOperands(MIB, MOs);
// Loop over the rest of the ri operands, converting them over.
- unsigned NumOps = MI->getDesc().getNumOperands()-2;
+ unsigned NumOps = MI.getDesc().getNumOperands() - 2;
for (unsigned i = 0; i != NumOps; ++i) {
- MachineOperand &MO = MI->getOperand(i+2);
+ MachineOperand &MO = MI.getOperand(i + 2);
MIB.addOperand(MO);
}
- for (unsigned i = NumOps+2, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
+ for (unsigned i = NumOps + 2, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
MIB.addOperand(MO);
}
@@ -5451,15 +5756,15 @@ static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode,
unsigned OpNo, ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
- MachineInstr *MI, const TargetInstrInfo &TII,
+ MachineInstr &MI, const TargetInstrInfo &TII,
int PtrOffset = 0) {
// Omit the implicit operands, something BuildMI can't do.
- MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
- MI->getDebugLoc(), true);
+ MachineInstr *NewMI =
+ MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
MachineInstrBuilder MIB(MF, NewMI);
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
if (i == OpNo) {
assert(MO.isReg() && "Expected to fold into reg operand!");
addOperands(MIB, MOs, PtrOffset);
@@ -5477,35 +5782,35 @@ static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode,
static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
- MachineInstr *MI) {
+ MachineInstr &MI) {
MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
- MI->getDebugLoc(), TII.get(Opcode));
+ MI.getDebugLoc(), TII.get(Opcode));
addOperands(MIB, MOs);
return MIB.addImm(0);
}
MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
- MachineFunction &MF, MachineInstr *MI, unsigned OpNum,
+ MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
unsigned Size, unsigned Align) const {
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
case X86::INSERTPSrr:
case X86::VINSERTPSrr:
// Attempt to convert the load of inserted vector into a fold load
// of a single float.
if (OpNum == 2) {
- unsigned Imm = MI->getOperand(MI->getNumOperands() - 1).getImm();
+ unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
unsigned ZMask = Imm & 15;
unsigned DstIdx = (Imm >> 4) & 3;
unsigned SrcIdx = (Imm >> 6) & 3;
- unsigned RCSize = getRegClass(MI->getDesc(), OpNum, &RI, MF)->getSize();
+ unsigned RCSize = getRegClass(MI.getDesc(), OpNum, &RI, MF)->getSize();
if (Size <= RCSize && 4 <= Align) {
int PtrOffset = SrcIdx * 4;
unsigned NewImm = (DstIdx << 4) | ZMask;
unsigned NewOpCode =
- (MI->getOpcode() == X86::VINSERTPSrr ? X86::VINSERTPSrm
- : X86::INSERTPSrm);
+ (MI.getOpcode() == X86::VINSERTPSrr ? X86::VINSERTPSrm
+ : X86::INSERTPSrm);
MachineInstr *NewMI =
FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset);
NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm);
@@ -5513,17 +5818,34 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
}
}
break;
+ case X86::MOVHLPSrr:
+ case X86::VMOVHLPSrr:
+ // Move the upper 64-bits of the second operand to the lower 64-bits.
+ // To fold the load, adjust the pointer to the upper and use (V)MOVLPS.
+ // TODO: In most cases AVX doesn't have a 8-byte alignment requirement.
+ if (OpNum == 2) {
+ unsigned RCSize = getRegClass(MI.getDesc(), OpNum, &RI, MF)->getSize();
+ if (Size <= RCSize && 8 <= Align) {
+ unsigned NewOpCode =
+ (MI.getOpcode() == X86::VMOVHLPSrr ? X86::VMOVLPSrm
+ : X86::MOVLPSrm);
+ MachineInstr *NewMI =
+ FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, 8);
+ return NewMI;
+ }
+ }
+ break;
};
return nullptr;
}
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
- MachineFunction &MF, MachineInstr *MI, unsigned OpNum,
+ MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
unsigned Size, unsigned Align, bool AllowCommute) const {
const DenseMap<unsigned,
- std::pair<unsigned,unsigned> > *OpcodeTablePtr = nullptr;
+ std::pair<uint16_t, uint16_t> > *OpcodeTablePtr = nullptr;
bool isCallRegIndirect = Subtarget.callRegIndirect();
bool isTwoAddrFold = false;
@@ -5531,19 +5853,19 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// do not fold loads into calls or pushes, unless optimizing for size
// aggressively.
if (isCallRegIndirect && !MF.getFunction()->optForMinSize() &&
- (MI->getOpcode() == X86::CALL32r || MI->getOpcode() == X86::CALL64r ||
- MI->getOpcode() == X86::PUSH16r || MI->getOpcode() == X86::PUSH32r ||
- MI->getOpcode() == X86::PUSH64r))
+ (MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r ||
+ MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r ||
+ MI.getOpcode() == X86::PUSH64r))
return nullptr;
- unsigned NumOps = MI->getDesc().getNumOperands();
- bool isTwoAddr = NumOps > 1 &&
- MI->getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
+ unsigned NumOps = MI.getDesc().getNumOperands();
+ bool isTwoAddr =
+ NumOps > 1 && MI.getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
// FIXME: AsmPrinter doesn't know how to handle
// X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
- if (MI->getOpcode() == X86::ADD32ri &&
- MI->getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
+ if (MI.getOpcode() == X86::ADD32ri &&
+ MI.getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
return nullptr;
MachineInstr *NewMI = nullptr;
@@ -5556,14 +5878,13 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// Folding a memory location into the two-address part of a two-address
// instruction is different than folding it other places. It requires
// replacing the *two* registers with the memory location.
- if (isTwoAddr && NumOps >= 2 && OpNum < 2 &&
- MI->getOperand(0).isReg() &&
- MI->getOperand(1).isReg() &&
- MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
+ if (isTwoAddr && NumOps >= 2 && OpNum < 2 && MI.getOperand(0).isReg() &&
+ MI.getOperand(1).isReg() &&
+ MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
isTwoAddrFold = true;
} else if (OpNum == 0) {
- if (MI->getOpcode() == X86::MOV32r0) {
+ if (MI.getOpcode() == X86::MOV32r0) {
NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI);
if (NewMI)
return NewMI;
@@ -5583,8 +5904,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// If table selected...
if (OpcodeTablePtr) {
// Find the Opcode to fuse
- DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
- OpcodeTablePtr->find(MI->getOpcode());
+ auto I = OpcodeTablePtr->find(MI.getOpcode());
if (I != OpcodeTablePtr->end()) {
unsigned Opcode = I->second.first;
unsigned MinAlign = (I->second.second & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT;
@@ -5592,7 +5912,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
return nullptr;
bool NarrowToMOV32rm = false;
if (Size) {
- unsigned RCSize = getRegClass(MI->getDesc(), OpNum, &RI, MF)->getSize();
+ unsigned RCSize = getRegClass(MI.getDesc(), OpNum, &RI, MF)->getSize();
if (Size < RCSize) {
// Check if it's safe to fold the load. If the size of the object is
// narrower than the load width, then it's not.
@@ -5601,7 +5921,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// If this is a 64-bit load, but the spill slot is 32, then we can do
// a 32-bit load which is implicitly zero-extended. This likely is
// due to live interval analysis remat'ing a load from stack slot.
- if (MI->getOperand(0).getSubReg() || MI->getOperand(1).getSubReg())
+ if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
return nullptr;
Opcode = X86::MOV32rm;
NarrowToMOV32rm = true;
@@ -5632,14 +5952,14 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
if (AllowCommute) {
unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex;
if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) {
- bool HasDef = MI->getDesc().getNumDefs();
- unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
- unsigned Reg1 = MI->getOperand(CommuteOpIdx1).getReg();
- unsigned Reg2 = MI->getOperand(CommuteOpIdx2).getReg();
+ bool HasDef = MI.getDesc().getNumDefs();
+ unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
+ unsigned Reg1 = MI.getOperand(CommuteOpIdx1).getReg();
+ unsigned Reg2 = MI.getOperand(CommuteOpIdx2).getReg();
bool Tied1 =
- 0 == MI->getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO);
+ 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO);
bool Tied2 =
- 0 == MI->getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO);
+ 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO);
// If either of the commutable operands are tied to the destination
// then we can not commute + fold.
@@ -5653,7 +5973,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// Unable to commute.
return nullptr;
}
- if (CommutedMI != MI) {
+ if (CommutedMI != &MI) {
// New instruction. We can't fold from this.
CommutedMI->eraseFromParent();
return nullptr;
@@ -5672,7 +5992,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// Unable to commute.
return nullptr;
}
- if (UncommutedMI != MI) {
+ if (UncommutedMI != &MI) {
// New instruction. It doesn't need to be kept.
UncommutedMI->eraseFromParent();
return nullptr;
@@ -5684,8 +6004,8 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
}
// No fusion
- if (PrintFailedFusing && !MI->isCopy())
- dbgs() << "We failed to fuse operand " << OpNum << " in " << *MI;
+ if (PrintFailedFusing && !MI.isCopy())
+ dbgs() << "We failed to fuse operand " << OpNum << " in " << MI;
return nullptr;
}
@@ -5723,6 +6043,10 @@ static bool hasPartialRegUpdate(unsigned Opcode) {
case X86::CVTSS2SDrm:
case X86::Int_CVTSS2SDrr:
case X86::Int_CVTSS2SDrm:
+ case X86::MOVHPDrm:
+ case X86::MOVHPSrm:
+ case X86::MOVLPDrm:
+ case X86::MOVLPSrm:
case X86::RCPSSr:
case X86::RCPSSm:
case X86::RCPSSr_Int:
@@ -5753,27 +6077,27 @@ static bool hasPartialRegUpdate(unsigned Opcode) {
/// Inform the ExeDepsFix pass how many idle
/// instructions we would like before a partial register update.
-unsigned X86InstrInfo::
-getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
- const TargetRegisterInfo *TRI) const {
- if (OpNum != 0 || !hasPartialRegUpdate(MI->getOpcode()))
+unsigned X86InstrInfo::getPartialRegUpdateClearance(
+ const MachineInstr &MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const {
+ if (OpNum != 0 || !hasPartialRegUpdate(MI.getOpcode()))
return 0;
// If MI is marked as reading Reg, the partial register update is wanted.
- const MachineOperand &MO = MI->getOperand(0);
+ const MachineOperand &MO = MI.getOperand(0);
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
- if (MO.readsReg() || MI->readsVirtualRegister(Reg))
+ if (MO.readsReg() || MI.readsVirtualRegister(Reg))
return 0;
} else {
- if (MI->readsRegister(Reg, TRI))
+ if (MI.readsRegister(Reg, TRI))
return 0;
}
- // If any of the preceding 16 instructions are reading Reg, insert a
- // dependency breaking instruction. The magic number is based on a few
- // Nehalem experiments.
- return 16;
+ // If any instructions in the clearance range are reading Reg, insert a
+ // dependency breaking instruction, which is inexpensive and is likely to
+ // be hidden in other instruction's cycles.
+ return PartialRegUpdateClearance;
}
// Return true for any instruction the copies the high bits of the first source
@@ -5847,59 +6171,61 @@ static bool hasUndefRegUpdate(unsigned Opcode) {
///
/// Like getPartialRegUpdateClearance, this makes a strong assumption that the
/// high bits that are passed-through are not live.
-unsigned X86InstrInfo::
-getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum,
- const TargetRegisterInfo *TRI) const {
- if (!hasUndefRegUpdate(MI->getOpcode()))
+unsigned
+X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
+ const TargetRegisterInfo *TRI) const {
+ if (!hasUndefRegUpdate(MI.getOpcode()))
return 0;
// Set the OpNum parameter to the first source operand.
OpNum = 1;
- const MachineOperand &MO = MI->getOperand(OpNum);
+ const MachineOperand &MO = MI.getOperand(OpNum);
if (MO.isUndef() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
- // Use the same magic number as getPartialRegUpdateClearance.
- return 16;
+ return UndefRegClearance;
}
return 0;
}
-void X86InstrInfo::
-breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
- const TargetRegisterInfo *TRI) const {
- unsigned Reg = MI->getOperand(OpNum).getReg();
+void X86InstrInfo::breakPartialRegDependency(
+ MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {
+ unsigned Reg = MI.getOperand(OpNum).getReg();
// If MI kills this register, the false dependence is already broken.
- if (MI->killsRegister(Reg, TRI))
+ if (MI.killsRegister(Reg, TRI))
return;
if (X86::VR128RegClass.contains(Reg)) {
// These instructions are all floating point domain, so xorps is the best
// choice.
unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr;
- BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(Opc), Reg)
- .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
- MI->addRegisterKilled(Reg, TRI, true);
+ BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(Opc), Reg)
+ .addReg(Reg, RegState::Undef)
+ .addReg(Reg, RegState::Undef);
+ MI.addRegisterKilled(Reg, TRI, true);
} else if (X86::VR256RegClass.contains(Reg)) {
// Use vxorps to clear the full ymm register.
// It wants to read and write the xmm sub-register.
unsigned XReg = TRI->getSubReg(Reg, X86::sub_xmm);
- BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(X86::VXORPSrr), XReg)
- .addReg(XReg, RegState::Undef).addReg(XReg, RegState::Undef)
- .addReg(Reg, RegState::ImplicitDefine);
- MI->addRegisterKilled(Reg, TRI, true);
+ BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VXORPSrr), XReg)
+ .addReg(XReg, RegState::Undef)
+ .addReg(XReg, RegState::Undef)
+ .addReg(Reg, RegState::ImplicitDefine);
+ MI.addRegisterKilled(Reg, TRI, true);
}
}
-MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
- MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
+MachineInstr *
+X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
+ ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt,
+ int FrameIndex, LiveIntervals *LIS) const {
// Check switch flag
if (NoFusing)
return nullptr;
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
- if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI->getOpcode()))
+ if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI.getOpcode()))
return nullptr;
const MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -5913,7 +6239,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
unsigned NewOpc = 0;
unsigned RCSize = 0;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: return nullptr;
case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break;
case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break;
@@ -5925,8 +6251,8 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
if (Size < RCSize)
return nullptr;
// Change to CMPXXri r, 0 first.
- MI->setDesc(get(NewOpc));
- MI->getOperand(1).ChangeToImmediate(0);
+ MI.setDesc(get(NewOpc));
+ MI.getOperand(1).ChangeToImmediate(0);
} else if (Ops.size() != 1)
return nullptr;
@@ -5957,15 +6283,16 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
unsigned RegSize =
MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg())->getSize();
- if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm) && RegSize > 4) {
+ if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm) &&
+ RegSize > 4) {
// These instructions only load 32 bits, we can't fold them if the
// destination register is wider than 32 bits (4 bytes), and its user
// instruction isn't scalar (SS).
switch (UserOpc) {
- case X86::ADDSSrr_Int: case X86::VADDSSrr_Int:
- case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int:
- case X86::MULSSrr_Int: case X86::VMULSSrr_Int:
- case X86::SUBSSrr_Int: case X86::VSUBSSrr_Int:
+ case X86::ADDSSrr_Int: case X86::VADDSSrr_Int: case X86::VADDSSZrr_Int:
+ case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int: case X86::VDIVSSZrr_Int:
+ case X86::MULSSrr_Int: case X86::VMULSSrr_Int: case X86::VMULSSZrr_Int:
+ case X86::SUBSSrr_Int: case X86::VSUBSSrr_Int: case X86::VSUBSSZrr_Int:
case X86::VFMADDSSr132r_Int: case X86::VFNMADDSSr132r_Int:
case X86::VFMADDSSr213r_Int: case X86::VFNMADDSSr213r_Int:
case X86::VFMADDSSr231r_Int: case X86::VFNMADDSSr231r_Int:
@@ -5978,15 +6305,16 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
}
}
- if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm) && RegSize > 8) {
+ if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm) &&
+ RegSize > 8) {
// These instructions only load 64 bits, we can't fold them if the
// destination register is wider than 64 bits (8 bytes), and its user
// instruction isn't scalar (SD).
switch (UserOpc) {
- case X86::ADDSDrr_Int: case X86::VADDSDrr_Int:
- case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int:
- case X86::MULSDrr_Int: case X86::VMULSDrr_Int:
- case X86::SUBSDrr_Int: case X86::VSUBSDrr_Int:
+ case X86::ADDSDrr_Int: case X86::VADDSDrr_Int: case X86::VADDSDZrr_Int:
+ case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int: case X86::VDIVSDZrr_Int:
+ case X86::MULSDrr_Int: case X86::VMULSDrr_Int: case X86::VMULSDZrr_Int:
+ case X86::SUBSDrr_Int: case X86::VSUBSDrr_Int: case X86::VSUBSDZrr_Int:
case X86::VFMADDSDr132r_Int: case X86::VFNMADDSDr132r_Int:
case X86::VFMADDSDr213r_Int: case X86::VFNMADDSDr213r_Int:
case X86::VFMADDSDr231r_Int: case X86::VFNMADDSDr231r_Int:
@@ -6003,36 +6331,43 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
}
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
- MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
+ MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
+ LiveIntervals *LIS) const {
// If loading from a FrameIndex, fold directly from the FrameIndex.
- unsigned NumOps = LoadMI->getDesc().getNumOperands();
+ unsigned NumOps = LoadMI.getDesc().getNumOperands();
int FrameIndex;
if (isLoadFromStackSlot(LoadMI, FrameIndex)) {
- if (isNonFoldablePartialRegisterLoad(*LoadMI, *MI, MF))
+ if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
return nullptr;
- return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex);
+ return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS);
}
// Check switch flag
if (NoFusing) return nullptr;
// Avoid partial register update stalls unless optimizing for size.
- if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI->getOpcode()))
+ if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI.getOpcode()))
return nullptr;
// Determine the alignment of the load.
unsigned Alignment = 0;
- if (LoadMI->hasOneMemOperand())
- Alignment = (*LoadMI->memoperands_begin())->getAlignment();
+ if (LoadMI.hasOneMemOperand())
+ Alignment = (*LoadMI.memoperands_begin())->getAlignment();
else
- switch (LoadMI->getOpcode()) {
+ switch (LoadMI.getOpcode()) {
+ case X86::AVX512_512_SET0:
+ case X86::AVX512_512_SETALLONES:
+ Alignment = 64;
+ break;
case X86::AVX2_SETALLONES:
case X86::AVX_SET0:
+ case X86::AVX512_256_SET0:
Alignment = 32;
break;
case X86::V_SET0:
case X86::V_SETALLONES:
+ case X86::AVX512_128_SET0:
Alignment = 16;
break;
case X86::FsFLD0SD:
@@ -6046,7 +6381,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
}
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
unsigned NewOpc = 0;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: return nullptr;
case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
case X86::TEST16rr: NewOpc = X86::CMP16ri8; break;
@@ -6054,22 +6389,26 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
case X86::TEST64rr: NewOpc = X86::CMP64ri8; break;
}
// Change to CMPXXri r, 0 first.
- MI->setDesc(get(NewOpc));
- MI->getOperand(1).ChangeToImmediate(0);
+ MI.setDesc(get(NewOpc));
+ MI.getOperand(1).ChangeToImmediate(0);
} else if (Ops.size() != 1)
return nullptr;
// Make sure the subregisters match.
// Otherwise we risk changing the size of the load.
- if (LoadMI->getOperand(0).getSubReg() != MI->getOperand(Ops[0]).getSubReg())
+ if (LoadMI.getOperand(0).getSubReg() != MI.getOperand(Ops[0]).getSubReg())
return nullptr;
SmallVector<MachineOperand,X86::AddrNumOperands> MOs;
- switch (LoadMI->getOpcode()) {
+ switch (LoadMI.getOpcode()) {
case X86::V_SET0:
case X86::V_SETALLONES:
case X86::AVX2_SETALLONES:
case X86::AVX_SET0:
+ case X86::AVX512_128_SET0:
+ case X86::AVX512_256_SET0:
+ case X86::AVX512_512_SET0:
+ case X86::AVX512_512_SETALLONES:
case X86::FsFLD0SD:
case X86::FsFLD0SS: {
// Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
@@ -6082,7 +6421,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// x86-32 PIC requires a PIC base register for constant pools.
unsigned PICBase = 0;
- if (MF.getTarget().getRelocationModel() == Reloc::PIC_) {
+ if (MF.getTarget().isPositionIndependent()) {
if (Subtarget.is64Bit())
PICBase = X86::RIP;
else
@@ -6096,17 +6435,21 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// Create a constant-pool entry.
MachineConstantPool &MCP = *MF.getConstantPool();
Type *Ty;
- unsigned Opc = LoadMI->getOpcode();
+ unsigned Opc = LoadMI.getOpcode();
if (Opc == X86::FsFLD0SS)
Ty = Type::getFloatTy(MF.getFunction()->getContext());
else if (Opc == X86::FsFLD0SD)
Ty = Type::getDoubleTy(MF.getFunction()->getContext());
- else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0)
+ else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES)
+ Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()),16);
+ else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 ||
+ Opc == X86::AVX512_256_SET0)
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 8);
else
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4);
- bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES);
+ bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES ||
+ Opc == X86::AVX512_512_SETALLONES);
const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) :
Constant::getNullValue(Ty);
unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
@@ -6120,12 +6463,12 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
break;
}
default: {
- if (isNonFoldablePartialRegisterLoad(*LoadMI, *MI, MF))
+ if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
return nullptr;
// Folding a normal load. Just copy the load's address operands.
- MOs.append(LoadMI->operands_begin() + NumOps - X86::AddrNumOperands,
- LoadMI->operands_begin() + NumOps);
+ MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands,
+ LoadMI.operands_begin() + NumOps);
break;
}
}
@@ -6133,11 +6476,10 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
/*Size=*/0, Alignment, /*AllowCommute=*/true);
}
-bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
- unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
- SmallVectorImpl<MachineInstr*> &NewMIs) const {
- DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
- MemOp2RegOpTable.find(MI->getOpcode());
+bool X86InstrInfo::unfoldMemoryOperand(
+ MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad,
+ bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const {
+ auto I = MemOp2RegOpTable.find(MI.getOpcode());
if (I == MemOp2RegOpTable.end())
return false;
unsigned Opc = I->second.first;
@@ -6154,8 +6496,7 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
const MCInstrDesc &MCID = get(Opc);
const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
// TODO: Check if 32-byte or greater accesses are slow too?
- if (!MI->hasOneMemOperand() &&
- RC == &X86::VR128RegClass &&
+ if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass &&
Subtarget.isUnalignedMem16Slow())
// Without memoperands, loadRegFromAddr and storeRegToStackSlot will
// conservatively assume the address is unaligned. That's bad for
@@ -6165,8 +6506,8 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
SmallVector<MachineOperand,2> BeforeOps;
SmallVector<MachineOperand,2> AfterOps;
SmallVector<MachineOperand,4> ImpOps;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &Op = MI->getOperand(i);
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &Op = MI.getOperand(i);
if (i >= Index && i < Index + X86::AddrNumOperands)
AddrOps.push_back(Op);
else if (Op.isReg() && Op.isImplicit())
@@ -6179,10 +6520,8 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
// Emit the load instruction.
if (UnfoldLoad) {
- std::pair<MachineInstr::mmo_iterator,
- MachineInstr::mmo_iterator> MMOs =
- MF.extractLoadMemRefs(MI->memoperands_begin(),
- MI->memoperands_end());
+ std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator> MMOs =
+ MF.extractLoadMemRefs(MI.memoperands_begin(), MI.memoperands_end());
loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs);
if (UnfoldStore) {
// Address operands cannot be marked isKill.
@@ -6195,7 +6534,7 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
}
// Emit the data processing instruction.
- MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI->getDebugLoc(), true);
+ MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI.getDebugLoc(), true);
MachineInstrBuilder MIB(MF, DataMI);
if (FoldedStore)
@@ -6248,10 +6587,8 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
// Emit the store instruction.
if (UnfoldStore) {
const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
- std::pair<MachineInstr::mmo_iterator,
- MachineInstr::mmo_iterator> MMOs =
- MF.extractStoreMemRefs(MI->memoperands_begin(),
- MI->memoperands_end());
+ std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator> MMOs =
+ MF.extractStoreMemRefs(MI.memoperands_begin(), MI.memoperands_end());
storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs.first, MMOs.second, NewMIs);
}
@@ -6264,8 +6601,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
if (!N->isMachineOpcode())
return false;
- DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
- MemOp2RegOpTable.find(N->getMachineOpcode());
+ auto I = MemOp2RegOpTable.find(N->getMachineOpcode());
if (I == MemOp2RegOpTable.end())
return false;
unsigned Opc = I->second.first;
@@ -6371,8 +6707,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
bool UnfoldLoad, bool UnfoldStore,
unsigned *LoadRegIndex) const {
- DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
- MemOp2RegOpTable.find(Opc);
+ auto I = MemOp2RegOpTable.find(Opc);
if (I == MemOp2RegOpTable.end())
return 0;
bool FoldedLoad = I->second.second & TB_FOLDED_LOAD;
@@ -6411,6 +6746,7 @@ X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
case X86::MOVAPSrm:
case X86::MOVUPSrm:
case X86::MOVAPDrm:
+ case X86::MOVUPDrm:
case X86::MOVDQArm:
case X86::MOVDQUrm:
// AVX load instructions
@@ -6421,13 +6757,52 @@ X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
case X86::VMOVAPSrm:
case X86::VMOVUPSrm:
case X86::VMOVAPDrm:
+ case X86::VMOVUPDrm:
case X86::VMOVDQArm:
case X86::VMOVDQUrm:
case X86::VMOVAPSYrm:
case X86::VMOVUPSYrm:
case X86::VMOVAPDYrm:
+ case X86::VMOVUPDYrm:
case X86::VMOVDQAYrm:
case X86::VMOVDQUYrm:
+ // AVX512 load instructions
+ case X86::VMOVSSZrm:
+ case X86::VMOVSDZrm:
+ case X86::VMOVAPSZ128rm:
+ case X86::VMOVUPSZ128rm:
+ case X86::VMOVAPDZ128rm:
+ case X86::VMOVUPDZ128rm:
+ case X86::VMOVDQU8Z128rm:
+ case X86::VMOVDQU16Z128rm:
+ case X86::VMOVDQA32Z128rm:
+ case X86::VMOVDQU32Z128rm:
+ case X86::VMOVDQA64Z128rm:
+ case X86::VMOVDQU64Z128rm:
+ case X86::VMOVAPSZ256rm:
+ case X86::VMOVUPSZ256rm:
+ case X86::VMOVAPDZ256rm:
+ case X86::VMOVUPDZ256rm:
+ case X86::VMOVDQU8Z256rm:
+ case X86::VMOVDQU16Z256rm:
+ case X86::VMOVDQA32Z256rm:
+ case X86::VMOVDQU32Z256rm:
+ case X86::VMOVDQA64Z256rm:
+ case X86::VMOVDQU64Z256rm:
+ case X86::VMOVAPSZrm:
+ case X86::VMOVUPSZrm:
+ case X86::VMOVAPDZrm:
+ case X86::VMOVUPDZrm:
+ case X86::VMOVDQU8Zrm:
+ case X86::VMOVDQU16Zrm:
+ case X86::VMOVDQA32Zrm:
+ case X86::VMOVDQU32Zrm:
+ case X86::VMOVDQA64Zrm:
+ case X86::VMOVDQU64Zrm:
+ case X86::KMOVBkm:
+ case X86::KMOVWkm:
+ case X86::KMOVDkm:
+ case X86::KMOVQkm:
break;
}
switch (Opc2) {
@@ -6448,6 +6823,7 @@ X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
case X86::MOVAPSrm:
case X86::MOVUPSrm:
case X86::MOVAPDrm:
+ case X86::MOVUPDrm:
case X86::MOVDQArm:
case X86::MOVDQUrm:
// AVX load instructions
@@ -6458,13 +6834,52 @@ X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
case X86::VMOVAPSrm:
case X86::VMOVUPSrm:
case X86::VMOVAPDrm:
+ case X86::VMOVUPDrm:
case X86::VMOVDQArm:
case X86::VMOVDQUrm:
case X86::VMOVAPSYrm:
case X86::VMOVUPSYrm:
case X86::VMOVAPDYrm:
+ case X86::VMOVUPDYrm:
case X86::VMOVDQAYrm:
case X86::VMOVDQUYrm:
+ // AVX512 load instructions
+ case X86::VMOVSSZrm:
+ case X86::VMOVSDZrm:
+ case X86::VMOVAPSZ128rm:
+ case X86::VMOVUPSZ128rm:
+ case X86::VMOVAPDZ128rm:
+ case X86::VMOVUPDZ128rm:
+ case X86::VMOVDQU8Z128rm:
+ case X86::VMOVDQU16Z128rm:
+ case X86::VMOVDQA32Z128rm:
+ case X86::VMOVDQU32Z128rm:
+ case X86::VMOVDQA64Z128rm:
+ case X86::VMOVDQU64Z128rm:
+ case X86::VMOVAPSZ256rm:
+ case X86::VMOVUPSZ256rm:
+ case X86::VMOVAPDZ256rm:
+ case X86::VMOVUPDZ256rm:
+ case X86::VMOVDQU8Z256rm:
+ case X86::VMOVDQU16Z256rm:
+ case X86::VMOVDQA32Z256rm:
+ case X86::VMOVDQU32Z256rm:
+ case X86::VMOVDQA64Z256rm:
+ case X86::VMOVDQU64Z256rm:
+ case X86::VMOVAPSZrm:
+ case X86::VMOVUPSZrm:
+ case X86::VMOVAPDZrm:
+ case X86::VMOVUPDZrm:
+ case X86::VMOVDQU8Zrm:
+ case X86::VMOVDQU16Zrm:
+ case X86::VMOVDQA32Zrm:
+ case X86::VMOVDQU32Zrm:
+ case X86::VMOVDQA64Zrm:
+ case X86::VMOVDQU64Zrm:
+ case X86::KMOVBkm:
+ case X86::KMOVWkm:
+ case X86::KMOVDkm:
+ case X86::KMOVQkm:
break;
}
@@ -6540,8 +6955,8 @@ bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
return true;
}
-bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr* First,
- MachineInstr *Second) const {
+bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr &First,
+ MachineInstr &Second) const {
// Check if this processor supports macro-fusion. Since this is a minor
// heuristic, we haven't specifically reserved a feature. hasAVX is a decent
// proxy for SandyBridge+.
@@ -6554,7 +6969,7 @@ bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr* First,
FuseInc
} FuseKind;
- switch(Second->getOpcode()) {
+ switch (Second.getOpcode()) {
default:
return false;
case X86::JE_1:
@@ -6580,7 +6995,7 @@ bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr* First,
FuseKind = FuseTest;
break;
}
- switch (First->getOpcode()) {
+ switch (First.getOpcode()) {
default:
return false;
case X86::TEST8rr:
@@ -6703,8 +7118,6 @@ bool X86InstrInfo::
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
assert(Cond.size() == 1 && "Invalid X86 branch condition!");
X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm());
- if (CC == X86::COND_NE_OR_P || CC == X86::COND_NP_OR_E)
- return true;
Cond[0].setImm(GetOppositeBranchCondition(CC));
return false;
}
@@ -6827,29 +7240,29 @@ static const uint16_t *lookupAVX2(unsigned opcode, unsigned domain) {
}
std::pair<uint16_t, uint16_t>
-X86InstrInfo::getExecutionDomain(const MachineInstr *MI) const {
- uint16_t domain = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
+X86InstrInfo::getExecutionDomain(const MachineInstr &MI) const {
+ uint16_t domain = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
bool hasAVX2 = Subtarget.hasAVX2();
uint16_t validDomains = 0;
- if (domain && lookup(MI->getOpcode(), domain))
+ if (domain && lookup(MI.getOpcode(), domain))
validDomains = 0xe;
- else if (domain && lookupAVX2(MI->getOpcode(), domain))
+ else if (domain && lookupAVX2(MI.getOpcode(), domain))
validDomains = hasAVX2 ? 0xe : 0x6;
return std::make_pair(domain, validDomains);
}
-void X86InstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
+void X86InstrInfo::setExecutionDomain(MachineInstr &MI, unsigned Domain) const {
assert(Domain>0 && Domain<4 && "Invalid execution domain");
- uint16_t dom = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
+ uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
assert(dom && "Not an SSE instruction");
- const uint16_t *table = lookup(MI->getOpcode(), dom);
+ const uint16_t *table = lookup(MI.getOpcode(), dom);
if (!table) { // try the other table
assert((Subtarget.hasAVX2() || Domain < 3) &&
"256-bit vector operations only available in AVX2");
- table = lookupAVX2(MI->getOpcode(), dom);
+ table = lookupAVX2(MI.getOpcode(), dom);
}
assert(table && "Cannot change domain");
- MI->setDesc(get(table[Domain-1]));
+ MI.setDesc(get(table[Domain - 1]));
}
/// Return the noop instruction to use for a noop.
@@ -6886,6 +7299,10 @@ unsigned X86InstrInfo::getJumpInstrTableEntryBound() const {
bool X86InstrInfo::isHighLatencyDef(int opc) const {
switch (opc) {
default: return false;
+ case X86::DIVPDrm:
+ case X86::DIVPDrr:
+ case X86::DIVPSrm:
+ case X86::DIVPSrr:
case X86::DIVSDrm:
case X86::DIVSDrm_Int:
case X86::DIVSDrr:
@@ -6907,6 +7324,14 @@ bool X86InstrInfo::isHighLatencyDef(int opc) const {
case X86::SQRTSSr:
case X86::SQRTSSr_Int:
// AVX instructions with high latency
+ case X86::VDIVPDrm:
+ case X86::VDIVPDrr:
+ case X86::VDIVPDYrm:
+ case X86::VDIVPDYrr:
+ case X86::VDIVPSrm:
+ case X86::VDIVPSrr:
+ case X86::VDIVPSYrm:
+ case X86::VDIVPSYrr:
case X86::VDIVSDrm:
case X86::VDIVSDrm_Int:
case X86::VDIVSDrr:
@@ -6917,55 +7342,277 @@ bool X86InstrInfo::isHighLatencyDef(int opc) const {
case X86::VDIVSSrr_Int:
case X86::VSQRTPDm:
case X86::VSQRTPDr:
+ case X86::VSQRTPDYm:
+ case X86::VSQRTPDYr:
case X86::VSQRTPSm:
case X86::VSQRTPSr:
+ case X86::VSQRTPSYm:
+ case X86::VSQRTPSYr:
case X86::VSQRTSDm:
case X86::VSQRTSDm_Int:
case X86::VSQRTSDr:
+ case X86::VSQRTSDr_Int:
case X86::VSQRTSSm:
case X86::VSQRTSSm_Int:
case X86::VSQRTSSr:
+ case X86::VSQRTSSr_Int:
+ // AVX512 instructions with high latency
+ case X86::VDIVPDZ128rm:
+ case X86::VDIVPDZ128rmb:
+ case X86::VDIVPDZ128rmbk:
+ case X86::VDIVPDZ128rmbkz:
+ case X86::VDIVPDZ128rmk:
+ case X86::VDIVPDZ128rmkz:
+ case X86::VDIVPDZ128rr:
+ case X86::VDIVPDZ128rrk:
+ case X86::VDIVPDZ128rrkz:
+ case X86::VDIVPDZ256rm:
+ case X86::VDIVPDZ256rmb:
+ case X86::VDIVPDZ256rmbk:
+ case X86::VDIVPDZ256rmbkz:
+ case X86::VDIVPDZ256rmk:
+ case X86::VDIVPDZ256rmkz:
+ case X86::VDIVPDZ256rr:
+ case X86::VDIVPDZ256rrk:
+ case X86::VDIVPDZ256rrkz:
+ case X86::VDIVPDZrb:
+ case X86::VDIVPDZrbk:
+ case X86::VDIVPDZrbkz:
+ case X86::VDIVPDZrm:
+ case X86::VDIVPDZrmb:
+ case X86::VDIVPDZrmbk:
+ case X86::VDIVPDZrmbkz:
+ case X86::VDIVPDZrmk:
+ case X86::VDIVPDZrmkz:
+ case X86::VDIVPDZrr:
+ case X86::VDIVPDZrrk:
+ case X86::VDIVPDZrrkz:
+ case X86::VDIVPSZ128rm:
+ case X86::VDIVPSZ128rmb:
+ case X86::VDIVPSZ128rmbk:
+ case X86::VDIVPSZ128rmbkz:
+ case X86::VDIVPSZ128rmk:
+ case X86::VDIVPSZ128rmkz:
+ case X86::VDIVPSZ128rr:
+ case X86::VDIVPSZ128rrk:
+ case X86::VDIVPSZ128rrkz:
+ case X86::VDIVPSZ256rm:
+ case X86::VDIVPSZ256rmb:
+ case X86::VDIVPSZ256rmbk:
+ case X86::VDIVPSZ256rmbkz:
+ case X86::VDIVPSZ256rmk:
+ case X86::VDIVPSZ256rmkz:
+ case X86::VDIVPSZ256rr:
+ case X86::VDIVPSZ256rrk:
+ case X86::VDIVPSZ256rrkz:
+ case X86::VDIVPSZrb:
+ case X86::VDIVPSZrbk:
+ case X86::VDIVPSZrbkz:
+ case X86::VDIVPSZrm:
+ case X86::VDIVPSZrmb:
+ case X86::VDIVPSZrmbk:
+ case X86::VDIVPSZrmbkz:
+ case X86::VDIVPSZrmk:
+ case X86::VDIVPSZrmkz:
+ case X86::VDIVPSZrr:
+ case X86::VDIVPSZrrk:
+ case X86::VDIVPSZrrkz:
+ case X86::VDIVSDZrm:
+ case X86::VDIVSDZrr:
+ case X86::VDIVSDZrm_Int:
+ case X86::VDIVSDZrm_Intk:
+ case X86::VDIVSDZrm_Intkz:
+ case X86::VDIVSDZrr_Int:
+ case X86::VDIVSDZrr_Intk:
+ case X86::VDIVSDZrr_Intkz:
+ case X86::VDIVSDZrrb:
+ case X86::VDIVSDZrrbk:
+ case X86::VDIVSDZrrbkz:
+ case X86::VDIVSSZrm:
+ case X86::VDIVSSZrr:
+ case X86::VDIVSSZrm_Int:
+ case X86::VDIVSSZrm_Intk:
+ case X86::VDIVSSZrm_Intkz:
+ case X86::VDIVSSZrr_Int:
+ case X86::VDIVSSZrr_Intk:
+ case X86::VDIVSSZrr_Intkz:
+ case X86::VDIVSSZrrb:
+ case X86::VDIVSSZrrbk:
+ case X86::VDIVSSZrrbkz:
+ case X86::VSQRTPDZ128m:
+ case X86::VSQRTPDZ128mb:
+ case X86::VSQRTPDZ128mbk:
+ case X86::VSQRTPDZ128mbkz:
+ case X86::VSQRTPDZ128mk:
+ case X86::VSQRTPDZ128mkz:
+ case X86::VSQRTPDZ128r:
+ case X86::VSQRTPDZ128rk:
+ case X86::VSQRTPDZ128rkz:
+ case X86::VSQRTPDZ256m:
+ case X86::VSQRTPDZ256mb:
+ case X86::VSQRTPDZ256mbk:
+ case X86::VSQRTPDZ256mbkz:
+ case X86::VSQRTPDZ256mk:
+ case X86::VSQRTPDZ256mkz:
+ case X86::VSQRTPDZ256r:
+ case X86::VSQRTPDZ256rk:
+ case X86::VSQRTPDZ256rkz:
case X86::VSQRTPDZm:
+ case X86::VSQRTPDZmb:
+ case X86::VSQRTPDZmbk:
+ case X86::VSQRTPDZmbkz:
+ case X86::VSQRTPDZmk:
+ case X86::VSQRTPDZmkz:
case X86::VSQRTPDZr:
+ case X86::VSQRTPDZrb:
+ case X86::VSQRTPDZrbk:
+ case X86::VSQRTPDZrbkz:
+ case X86::VSQRTPDZrk:
+ case X86::VSQRTPDZrkz:
+ case X86::VSQRTPSZ128m:
+ case X86::VSQRTPSZ128mb:
+ case X86::VSQRTPSZ128mbk:
+ case X86::VSQRTPSZ128mbkz:
+ case X86::VSQRTPSZ128mk:
+ case X86::VSQRTPSZ128mkz:
+ case X86::VSQRTPSZ128r:
+ case X86::VSQRTPSZ128rk:
+ case X86::VSQRTPSZ128rkz:
+ case X86::VSQRTPSZ256m:
+ case X86::VSQRTPSZ256mb:
+ case X86::VSQRTPSZ256mbk:
+ case X86::VSQRTPSZ256mbkz:
+ case X86::VSQRTPSZ256mk:
+ case X86::VSQRTPSZ256mkz:
+ case X86::VSQRTPSZ256r:
+ case X86::VSQRTPSZ256rk:
+ case X86::VSQRTPSZ256rkz:
case X86::VSQRTPSZm:
+ case X86::VSQRTPSZmb:
+ case X86::VSQRTPSZmbk:
+ case X86::VSQRTPSZmbkz:
+ case X86::VSQRTPSZmk:
+ case X86::VSQRTPSZmkz:
case X86::VSQRTPSZr:
+ case X86::VSQRTPSZrb:
+ case X86::VSQRTPSZrbk:
+ case X86::VSQRTPSZrbkz:
+ case X86::VSQRTPSZrk:
+ case X86::VSQRTPSZrkz:
case X86::VSQRTSDZm:
case X86::VSQRTSDZm_Int:
+ case X86::VSQRTSDZm_Intk:
+ case X86::VSQRTSDZm_Intkz:
case X86::VSQRTSDZr:
+ case X86::VSQRTSDZr_Int:
+ case X86::VSQRTSDZr_Intk:
+ case X86::VSQRTSDZr_Intkz:
+ case X86::VSQRTSDZrb_Int:
+ case X86::VSQRTSDZrb_Intk:
+ case X86::VSQRTSDZrb_Intkz:
+ case X86::VSQRTSSZm:
case X86::VSQRTSSZm_Int:
+ case X86::VSQRTSSZm_Intk:
+ case X86::VSQRTSSZm_Intkz:
case X86::VSQRTSSZr:
- case X86::VSQRTSSZm:
- case X86::VDIVSDZrm:
- case X86::VDIVSDZrr:
- case X86::VDIVSSZrm:
- case X86::VDIVSSZrr:
-
- case X86::VGATHERQPSZrm:
- case X86::VGATHERQPDZrm:
+ case X86::VSQRTSSZr_Int:
+ case X86::VSQRTSSZr_Intk:
+ case X86::VSQRTSSZr_Intkz:
+ case X86::VSQRTSSZrb_Int:
+ case X86::VSQRTSSZrb_Intk:
+ case X86::VSQRTSSZrb_Intkz:
+
+ case X86::VGATHERDPDYrm:
+ case X86::VGATHERDPDZ128rm:
+ case X86::VGATHERDPDZ256rm:
case X86::VGATHERDPDZrm:
+ case X86::VGATHERDPDrm:
+ case X86::VGATHERDPSYrm:
+ case X86::VGATHERDPSZ128rm:
+ case X86::VGATHERDPSZ256rm:
case X86::VGATHERDPSZrm:
- case X86::VPGATHERQDZrm:
- case X86::VPGATHERQQZrm:
+ case X86::VGATHERDPSrm:
+ case X86::VGATHERPF0DPDm:
+ case X86::VGATHERPF0DPSm:
+ case X86::VGATHERPF0QPDm:
+ case X86::VGATHERPF0QPSm:
+ case X86::VGATHERPF1DPDm:
+ case X86::VGATHERPF1DPSm:
+ case X86::VGATHERPF1QPDm:
+ case X86::VGATHERPF1QPSm:
+ case X86::VGATHERQPDYrm:
+ case X86::VGATHERQPDZ128rm:
+ case X86::VGATHERQPDZ256rm:
+ case X86::VGATHERQPDZrm:
+ case X86::VGATHERQPDrm:
+ case X86::VGATHERQPSYrm:
+ case X86::VGATHERQPSZ128rm:
+ case X86::VGATHERQPSZ256rm:
+ case X86::VGATHERQPSZrm:
+ case X86::VGATHERQPSrm:
+ case X86::VPGATHERDDYrm:
+ case X86::VPGATHERDDZ128rm:
+ case X86::VPGATHERDDZ256rm:
case X86::VPGATHERDDZrm:
+ case X86::VPGATHERDDrm:
+ case X86::VPGATHERDQYrm:
+ case X86::VPGATHERDQZ128rm:
+ case X86::VPGATHERDQZ256rm:
case X86::VPGATHERDQZrm:
- case X86::VSCATTERQPDZmr:
- case X86::VSCATTERQPSZmr:
+ case X86::VPGATHERDQrm:
+ case X86::VPGATHERQDYrm:
+ case X86::VPGATHERQDZ128rm:
+ case X86::VPGATHERQDZ256rm:
+ case X86::VPGATHERQDZrm:
+ case X86::VPGATHERQDrm:
+ case X86::VPGATHERQQYrm:
+ case X86::VPGATHERQQZ128rm:
+ case X86::VPGATHERQQZ256rm:
+ case X86::VPGATHERQQZrm:
+ case X86::VPGATHERQQrm:
+ case X86::VSCATTERDPDZ128mr:
+ case X86::VSCATTERDPDZ256mr:
case X86::VSCATTERDPDZmr:
+ case X86::VSCATTERDPSZ128mr:
+ case X86::VSCATTERDPSZ256mr:
case X86::VSCATTERDPSZmr:
- case X86::VPSCATTERQDZmr:
- case X86::VPSCATTERQQZmr:
+ case X86::VSCATTERPF0DPDm:
+ case X86::VSCATTERPF0DPSm:
+ case X86::VSCATTERPF0QPDm:
+ case X86::VSCATTERPF0QPSm:
+ case X86::VSCATTERPF1DPDm:
+ case X86::VSCATTERPF1DPSm:
+ case X86::VSCATTERPF1QPDm:
+ case X86::VSCATTERPF1QPSm:
+ case X86::VSCATTERQPDZ128mr:
+ case X86::VSCATTERQPDZ256mr:
+ case X86::VSCATTERQPDZmr:
+ case X86::VSCATTERQPSZ128mr:
+ case X86::VSCATTERQPSZ256mr:
+ case X86::VSCATTERQPSZmr:
+ case X86::VPSCATTERDDZ128mr:
+ case X86::VPSCATTERDDZ256mr:
case X86::VPSCATTERDDZmr:
+ case X86::VPSCATTERDQZ128mr:
+ case X86::VPSCATTERDQZ256mr:
case X86::VPSCATTERDQZmr:
+ case X86::VPSCATTERQDZ128mr:
+ case X86::VPSCATTERQDZ256mr:
+ case X86::VPSCATTERQDZmr:
+ case X86::VPSCATTERQQZ128mr:
+ case X86::VPSCATTERQQZ256mr:
+ case X86::VPSCATTERQQZmr:
return true;
}
}
-bool X86InstrInfo::
-hasHighOperandLatency(const TargetSchedModel &SchedModel,
- const MachineRegisterInfo *MRI,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI, unsigned UseIdx) const {
- return isHighLatencyDef(DefMI->getOpcode());
+bool X86InstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
+ const MachineRegisterInfo *MRI,
+ const MachineInstr &DefMI,
+ unsigned DefIdx,
+ const MachineInstr &UseMI,
+ unsigned UseIdx) const {
+ return isHighLatencyDef(DefMI.getOpcode());
}
bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst,
@@ -7014,12 +7661,119 @@ bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
case X86::PANDrr:
case X86::PORrr:
case X86::PXORrr:
+ case X86::ANDPDrr:
+ case X86::ANDPSrr:
+ case X86::ORPDrr:
+ case X86::ORPSrr:
+ case X86::XORPDrr:
+ case X86::XORPSrr:
+ case X86::PADDBrr:
+ case X86::PADDWrr:
+ case X86::PADDDrr:
+ case X86::PADDQrr:
case X86::VPANDrr:
case X86::VPANDYrr:
+ case X86::VPANDDZ128rr:
+ case X86::VPANDDZ256rr:
+ case X86::VPANDDZrr:
+ case X86::VPANDQZ128rr:
+ case X86::VPANDQZ256rr:
+ case X86::VPANDQZrr:
case X86::VPORrr:
case X86::VPORYrr:
+ case X86::VPORDZ128rr:
+ case X86::VPORDZ256rr:
+ case X86::VPORDZrr:
+ case X86::VPORQZ128rr:
+ case X86::VPORQZ256rr:
+ case X86::VPORQZrr:
case X86::VPXORrr:
case X86::VPXORYrr:
+ case X86::VPXORDZ128rr:
+ case X86::VPXORDZ256rr:
+ case X86::VPXORDZrr:
+ case X86::VPXORQZ128rr:
+ case X86::VPXORQZ256rr:
+ case X86::VPXORQZrr:
+ case X86::VANDPDrr:
+ case X86::VANDPSrr:
+ case X86::VANDPDYrr:
+ case X86::VANDPSYrr:
+ case X86::VANDPDZ128rr:
+ case X86::VANDPSZ128rr:
+ case X86::VANDPDZ256rr:
+ case X86::VANDPSZ256rr:
+ case X86::VANDPDZrr:
+ case X86::VANDPSZrr:
+ case X86::VORPDrr:
+ case X86::VORPSrr:
+ case X86::VORPDYrr:
+ case X86::VORPSYrr:
+ case X86::VORPDZ128rr:
+ case X86::VORPSZ128rr:
+ case X86::VORPDZ256rr:
+ case X86::VORPSZ256rr:
+ case X86::VORPDZrr:
+ case X86::VORPSZrr:
+ case X86::VXORPDrr:
+ case X86::VXORPSrr:
+ case X86::VXORPDYrr:
+ case X86::VXORPSYrr:
+ case X86::VXORPDZ128rr:
+ case X86::VXORPSZ128rr:
+ case X86::VXORPDZ256rr:
+ case X86::VXORPSZ256rr:
+ case X86::VXORPDZrr:
+ case X86::VXORPSZrr:
+ case X86::KADDBrr:
+ case X86::KADDWrr:
+ case X86::KADDDrr:
+ case X86::KADDQrr:
+ case X86::KANDBrr:
+ case X86::KANDWrr:
+ case X86::KANDDrr:
+ case X86::KANDQrr:
+ case X86::KORBrr:
+ case X86::KORWrr:
+ case X86::KORDrr:
+ case X86::KORQrr:
+ case X86::KXORBrr:
+ case X86::KXORWrr:
+ case X86::KXORDrr:
+ case X86::KXORQrr:
+ case X86::VPADDBrr:
+ case X86::VPADDWrr:
+ case X86::VPADDDrr:
+ case X86::VPADDQrr:
+ case X86::VPADDBYrr:
+ case X86::VPADDWYrr:
+ case X86::VPADDDYrr:
+ case X86::VPADDQYrr:
+ case X86::VPADDBZ128rr:
+ case X86::VPADDWZ128rr:
+ case X86::VPADDDZ128rr:
+ case X86::VPADDQZ128rr:
+ case X86::VPADDBZ256rr:
+ case X86::VPADDWZ256rr:
+ case X86::VPADDDZ256rr:
+ case X86::VPADDQZ256rr:
+ case X86::VPADDBZrr:
+ case X86::VPADDWZrr:
+ case X86::VPADDDZrr:
+ case X86::VPADDQZrr:
+ case X86::VPMULLWrr:
+ case X86::VPMULLWYrr:
+ case X86::VPMULLWZ128rr:
+ case X86::VPMULLWZ256rr:
+ case X86::VPMULLWZrr:
+ case X86::VPMULLDrr:
+ case X86::VPMULLDYrr:
+ case X86::VPMULLDZ128rr:
+ case X86::VPMULLDZ256rr:
+ case X86::VPMULLDZrr:
+ case X86::VPMULLQZ128rr:
+ case X86::VPMULLQZ256rr:
+ case X86::VPMULLQZrr:
// Normal min/max instructions are not commutative because of NaN and signed
// zero semantics, but these are. Thus, there's no need to check for global
// relaxed math; the instructions themselves have the properties we need.
@@ -7035,14 +7789,30 @@ bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
case X86::VMAXCPSrr:
case X86::VMAXCPDYrr:
case X86::VMAXCPSYrr:
+ case X86::VMAXCPDZ128rr:
+ case X86::VMAXCPSZ128rr:
+ case X86::VMAXCPDZ256rr:
+ case X86::VMAXCPSZ256rr:
+ case X86::VMAXCPDZrr:
+ case X86::VMAXCPSZrr:
case X86::VMAXCSDrr:
case X86::VMAXCSSrr:
+ case X86::VMAXCSDZrr:
+ case X86::VMAXCSSZrr:
case X86::VMINCPDrr:
case X86::VMINCPSrr:
case X86::VMINCPDYrr:
case X86::VMINCPSYrr:
+ case X86::VMINCPDZ128rr:
+ case X86::VMINCPSZ128rr:
+ case X86::VMINCPDZ256rr:
+ case X86::VMINCPSZ256rr:
+ case X86::VMINCPDZrr:
+ case X86::VMINCPSZrr:
case X86::VMINCSDrr:
case X86::VMINCSSrr:
+ case X86::VMINCSDZrr:
+ case X86::VMINCSSZrr:
return true;
case X86::ADDPDrr:
case X86::ADDPSrr:
@@ -7056,14 +7826,30 @@ bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
case X86::VADDPSrr:
case X86::VADDPDYrr:
case X86::VADDPSYrr:
+ case X86::VADDPDZ128rr:
+ case X86::VADDPSZ128rr:
+ case X86::VADDPDZ256rr:
+ case X86::VADDPSZ256rr:
+ case X86::VADDPDZrr:
+ case X86::VADDPSZrr:
case X86::VADDSDrr:
case X86::VADDSSrr:
+ case X86::VADDSDZrr:
+ case X86::VADDSSZrr:
case X86::VMULPDrr:
case X86::VMULPSrr:
case X86::VMULPDYrr:
case X86::VMULPSYrr:
+ case X86::VMULPDZ128rr:
+ case X86::VMULPSZ128rr:
+ case X86::VMULPDZ256rr:
+ case X86::VMULPSZ256rr:
+ case X86::VMULPDZrr:
+ case X86::VMULPSZrr:
case X86::VMULSDrr:
case X86::VMULSSrr:
+ case X86::VMULSDZrr:
+ case X86::VMULSSZrr:
return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
default:
return false;
@@ -7135,10 +7921,8 @@ X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
{MO_NTPOFF, "x86-ntpoff"},
{MO_GOTNTPOFF, "x86-gotntpoff"},
{MO_DLLIMPORT, "x86-dllimport"},
- {MO_DARWIN_STUB, "x86-darwin-stub"},
{MO_DARWIN_NONLAZY, "x86-darwin-nonlazy"},
{MO_DARWIN_NONLAZY_PIC_BASE, "x86-darwin-nonlazy-pic-base"},
- {MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE, "x86-darwin-hidden-nonlazy-pic-base"},
{MO_TLVP, "x86-tlvp"},
{MO_TLVP_PIC_BASE, "x86-tlvp-pic-base"},
{MO_SECREL, "x86-secrel"}};
@@ -7163,7 +7947,7 @@ namespace {
return false;
// Only emit a global base reg in PIC mode.
- if (TM->getRelocationModel() != Reloc::PIC_)
+ if (!TM->isPositionIndependent())
return false;
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
@@ -7223,7 +8007,10 @@ namespace {
LDTLSCleanup() : MachineFunctionPass(ID) {}
bool runOnMachineFunction(MachineFunction &MF) override {
- X86MachineFunctionInfo* MFI = MF.getInfo<X86MachineFunctionInfo>();
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
+ X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
// No point folding accesses if there isn't at least two.
return false;
@@ -7249,9 +8036,9 @@ namespace {
case X86::TLS_base_addr32:
case X86::TLS_base_addr64:
if (TLSBaseAddrReg)
- I = ReplaceTLSBaseAddrCall(I, TLSBaseAddrReg);
+ I = ReplaceTLSBaseAddrCall(*I, TLSBaseAddrReg);
else
- I = SetRegister(I, &TLSBaseAddrReg);
+ I = SetRegister(*I, &TLSBaseAddrReg);
Changed = true;
break;
default:
@@ -7270,29 +8057,29 @@ namespace {
// Replace the TLS_base_addr instruction I with a copy from
// TLSBaseAddrReg, returning the new instruction.
- MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr *I,
+ MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I,
unsigned TLSBaseAddrReg) {
- MachineFunction *MF = I->getParent()->getParent();
+ MachineFunction *MF = I.getParent()->getParent();
const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
const bool is64Bit = STI.is64Bit();
const X86InstrInfo *TII = STI.getInstrInfo();
// Insert a Copy from TLSBaseAddrReg to RAX/EAX.
- MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(),
- TII->get(TargetOpcode::COPY),
- is64Bit ? X86::RAX : X86::EAX)
- .addReg(TLSBaseAddrReg);
+ MachineInstr *Copy =
+ BuildMI(*I.getParent(), I, I.getDebugLoc(),
+ TII->get(TargetOpcode::COPY), is64Bit ? X86::RAX : X86::EAX)
+ .addReg(TLSBaseAddrReg);
// Erase the TLS_base_addr instruction.
- I->eraseFromParent();
+ I.eraseFromParent();
return Copy;
}
// Create a virtal register in *TLSBaseAddrReg, and populate it by
// inserting a copy instruction after I. Returns the new instruction.
- MachineInstr *SetRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) {
- MachineFunction *MF = I->getParent()->getParent();
+ MachineInstr *SetRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) {
+ MachineFunction *MF = I.getParent()->getParent();
const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
const bool is64Bit = STI.is64Bit();
const X86InstrInfo *TII = STI.getInstrInfo();
@@ -7304,11 +8091,11 @@ namespace {
: &X86::GR32RegClass);
// Insert a copy from RAX/EAX to TLSBaseAddrReg.
- MachineInstr *Next = I->getNextNode();
- MachineInstr *Copy = BuildMI(*I->getParent(), Next, I->getDebugLoc(),
- TII->get(TargetOpcode::COPY),
- *TLSBaseAddrReg)
- .addReg(is64Bit ? X86::RAX : X86::EAX);
+ MachineInstr *Next = I.getNextNode();
+ MachineInstr *Copy =
+ BuildMI(*I.getParent(), Next, I.getDebugLoc(),
+ TII->get(TargetOpcode::COPY), *TLSBaseAddrReg)
+ .addReg(is64Bit ? X86::RAX : X86::EAX);
return Copy;
}
diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h
index edd09d6175952..858f35d1cbf0d 100644
--- a/lib/Target/X86/X86InstrInfo.h
+++ b/lib/Target/X86/X86InstrInfo.h
@@ -23,60 +23,61 @@
#include "X86GenInstrInfo.inc"
namespace llvm {
+ class MachineInstrBuilder;
class X86RegisterInfo;
class X86Subtarget;
namespace X86 {
// X86 specific condition code. These correspond to X86_*_COND in
// X86InstrInfo.td. They must be kept in synch.
- enum CondCode {
- COND_A = 0,
- COND_AE = 1,
- COND_B = 2,
- COND_BE = 3,
- COND_E = 4,
- COND_G = 5,
- COND_GE = 6,
- COND_L = 7,
- COND_LE = 8,
- COND_NE = 9,
- COND_NO = 10,
- COND_NP = 11,
- COND_NS = 12,
- COND_O = 13,
- COND_P = 14,
- COND_S = 15,
- LAST_VALID_COND = COND_S,
-
- // Artificial condition codes. These are used by AnalyzeBranch
- // to indicate a block terminated with two conditional branches to
- // the same location. This occurs in code using FCMP_OEQ or FCMP_UNE,
- // which can't be represented on x86 with a single condition. These
- // are never used in MachineInstrs.
- COND_NE_OR_P,
- COND_NP_OR_E,
-
- COND_INVALID
- };
-
- // Turn condition code into conditional branch opcode.
- unsigned GetCondBranchFromCond(CondCode CC);
-
- /// \brief Return a set opcode for the given condition and whether it has
- /// a memory operand.
- unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand = false);
-
- /// \brief Return a cmov opcode for the given condition, register size in
- /// bytes, and operand type.
- unsigned getCMovFromCond(CondCode CC, unsigned RegBytes,
- bool HasMemoryOperand = false);
-
- // Turn CMov opcode into condition code.
- CondCode getCondFromCMovOpc(unsigned Opc);
-
- /// GetOppositeBranchCondition - Return the inverse of the specified cond,
- /// e.g. turning COND_E to COND_NE.
- CondCode GetOppositeBranchCondition(CondCode CC);
+enum CondCode {
+ COND_A = 0,
+ COND_AE = 1,
+ COND_B = 2,
+ COND_BE = 3,
+ COND_E = 4,
+ COND_G = 5,
+ COND_GE = 6,
+ COND_L = 7,
+ COND_LE = 8,
+ COND_NE = 9,
+ COND_NO = 10,
+ COND_NP = 11,
+ COND_NS = 12,
+ COND_O = 13,
+ COND_P = 14,
+ COND_S = 15,
+ LAST_VALID_COND = COND_S,
+
+ // Artificial condition codes. These are used by AnalyzeBranch
+ // to indicate a block terminated with two conditional branches that together
+ // form a compound condition. They occur in code using FCMP_OEQ or FCMP_UNE,
+ // which can't be represented on x86 with a single condition. These
+ // are never used in MachineInstrs and are inverses of one another.
+ COND_NE_OR_P,
+ COND_E_AND_NP,
+
+ COND_INVALID
+};
+
+// Turn condition code into conditional branch opcode.
+unsigned GetCondBranchFromCond(CondCode CC);
+
+/// \brief Return a set opcode for the given condition and whether it has
+/// a memory operand.
+unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand = false);
+
+/// \brief Return a cmov opcode for the given condition, register size in
+/// bytes, and operand type.
+unsigned getCMovFromCond(CondCode CC, unsigned RegBytes,
+ bool HasMemoryOperand = false);
+
+// Turn CMov opcode into condition code.
+CondCode getCondFromCMovOpc(unsigned Opc);
+
+/// GetOppositeBranchCondition - Return the inverse of the specified cond,
+/// e.g. turning COND_E to COND_NE.
+CondCode GetOppositeBranchCondition(CondCode CC);
} // end namespace X86;
@@ -89,7 +90,6 @@ inline static bool isGlobalStubReference(unsigned char TargetFlag) {
case X86II::MO_GOT: // normal GOT reference.
case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Normal $non_lazy_ptr ref.
case X86II::MO_DARWIN_NONLAZY: // Normal $non_lazy_ptr ref.
- case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: // Hidden $non_lazy_ptr ref.
return true;
default:
return false;
@@ -105,7 +105,6 @@ inline static bool isGlobalRelativeToPICBase(unsigned char TargetFlag) {
case X86II::MO_GOT: // isPICStyleGOT: other global.
case X86II::MO_PIC_BASE_OFFSET: // Darwin local global.
case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Darwin/32 external global.
- case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: // Darwin/32 hidden global.
case X86II::MO_TLVP: // ??? Pretty sure..
return true;
default:
@@ -119,23 +118,24 @@ inline static bool isScale(const MachineOperand &MO) {
MO.getImm() == 4 || MO.getImm() == 8);
}
-inline static bool isLeaMem(const MachineInstr *MI, unsigned Op) {
- if (MI->getOperand(Op).isFI()) return true;
- return Op+X86::AddrSegmentReg <= MI->getNumOperands() &&
- MI->getOperand(Op+X86::AddrBaseReg).isReg() &&
- isScale(MI->getOperand(Op+X86::AddrScaleAmt)) &&
- MI->getOperand(Op+X86::AddrIndexReg).isReg() &&
- (MI->getOperand(Op+X86::AddrDisp).isImm() ||
- MI->getOperand(Op+X86::AddrDisp).isGlobal() ||
- MI->getOperand(Op+X86::AddrDisp).isCPI() ||
- MI->getOperand(Op+X86::AddrDisp).isJTI());
+inline static bool isLeaMem(const MachineInstr &MI, unsigned Op) {
+ if (MI.getOperand(Op).isFI())
+ return true;
+ return Op + X86::AddrSegmentReg <= MI.getNumOperands() &&
+ MI.getOperand(Op + X86::AddrBaseReg).isReg() &&
+ isScale(MI.getOperand(Op + X86::AddrScaleAmt)) &&
+ MI.getOperand(Op + X86::AddrIndexReg).isReg() &&
+ (MI.getOperand(Op + X86::AddrDisp).isImm() ||
+ MI.getOperand(Op + X86::AddrDisp).isGlobal() ||
+ MI.getOperand(Op + X86::AddrDisp).isCPI() ||
+ MI.getOperand(Op + X86::AddrDisp).isJTI());
}
-inline static bool isMem(const MachineInstr *MI, unsigned Op) {
- if (MI->getOperand(Op).isFI()) return true;
- return Op+X86::AddrNumOperands <= MI->getNumOperands() &&
- MI->getOperand(Op+X86::AddrSegmentReg).isReg() &&
- isLeaMem(MI, Op);
+inline static bool isMem(const MachineInstr &MI, unsigned Op) {
+ if (MI.getOperand(Op).isFI())
+ return true;
+ return Op + X86::AddrNumOperands <= MI.getNumOperands() &&
+ MI.getOperand(Op + X86::AddrSegmentReg).isReg() && isLeaMem(MI, Op);
}
class X86InstrInfo final : public X86GenInstrInfo {
@@ -146,7 +146,7 @@ class X86InstrInfo final : public X86GenInstrInfo {
/// RegOp2MemOpTable2, RegOp2MemOpTable3 - Load / store folding opcode maps.
///
typedef DenseMap<unsigned,
- std::pair<unsigned, unsigned> > RegOp2MemOpTableType;
+ std::pair<uint16_t, uint16_t> > RegOp2MemOpTableType;
RegOp2MemOpTableType RegOp2MemOpTable2Addr;
RegOp2MemOpTableType RegOp2MemOpTable0;
RegOp2MemOpTableType RegOp2MemOpTable1;
@@ -157,12 +157,12 @@ class X86InstrInfo final : public X86GenInstrInfo {
/// MemOp2RegOpTable - Load / store unfolding opcode map.
///
typedef DenseMap<unsigned,
- std::pair<unsigned, unsigned> > MemOp2RegOpTableType;
+ std::pair<uint16_t, uint16_t> > MemOp2RegOpTableType;
MemOp2RegOpTableType MemOp2RegOpTable;
static void AddTableEntry(RegOp2MemOpTableType &R2MTable,
MemOp2RegOpTableType &M2RTable,
- unsigned RegOp, unsigned MemOp, unsigned Flags);
+ uint16_t RegOp, uint16_t MemOp, uint16_t Flags);
virtual void anchor();
@@ -184,7 +184,7 @@ public:
/// getSPAdjust - This returns the stack pointer adjustment made by
/// this instruction. For x86, we need to handle more complex call
/// sequences involving PUSHes.
- int getSPAdjust(const MachineInstr *MI) const override;
+ int getSPAdjust(const MachineInstr &MI) const override;
/// isCoalescableExtInstr - Return true if the instruction is a "coalescable"
/// extension instruction. That is, it's like a copy where it's legal for the
@@ -196,27 +196,27 @@ public:
unsigned &SrcReg, unsigned &DstReg,
unsigned &SubIdx) const override;
- unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination
/// stack locations as well. This uses a heuristic so it isn't
/// reliable for correctness.
- unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
+ unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const override;
- unsigned isStoreToStackSlot(const MachineInstr *MI,
+ unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// isStoreToStackSlotPostFE - Check for post-frame ptr elimination
/// stack locations as well. This uses a heuristic so it isn't
/// reliable for correctness.
- unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
+ unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const override;
- bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
+ bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
AliasAnalysis *AA) const override;
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubIdx,
- const MachineInstr *Orig,
+ const MachineInstr &Orig,
const TargetRegisterInfo &TRI) const override;
/// Given an operand within a MachineInstr, insert preceding code to put it
@@ -227,10 +227,10 @@ public:
///
/// Reference parameters are set to indicate how caller should add this
/// operand to the LEA instruction.
- bool classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
- unsigned LEAOpcode, bool AllowSP,
- unsigned &NewSrc, bool &isKill,
- bool &isUndef, MachineOperand &ImplicitOp) const;
+ bool classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
+ unsigned LEAOpcode, bool AllowSP, unsigned &NewSrc,
+ bool &isKill, bool &isUndef,
+ MachineOperand &ImplicitOp) const;
/// convertToThreeAddress - This method must be implemented by targets that
/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
@@ -243,7 +243,7 @@ public:
/// performed, otherwise it returns the new instruction.
///
MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
+ MachineInstr &MI,
LiveVariables *LV) const override;
/// Returns true iff the routine could find two commutable operands in the
@@ -261,7 +261,7 @@ public:
/// findCommutedOpIndices(MI, Op1, Op2);
/// can be interpreted as a query asking to find an operand that would be
/// commutable with the operand#1.
- bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
+ bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const override;
/// Returns true if the routine could find two commutable operands
@@ -286,8 +286,7 @@ public:
/// FMA213 #1, #2, #3
/// results into instruction with adjusted opcode:
/// FMA231 #3, #2, #1
- bool findFMA3CommutedOpIndices(MachineInstr *MI,
- unsigned &SrcOpIdx1,
+ bool findFMA3CommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const;
/// Returns an adjusted FMA opcode that must be used in FMA instruction that
@@ -300,37 +299,35 @@ public:
/// FMA213 #1, #2, #3
/// results into instruction with adjusted opcode:
/// FMA231 #3, #2, #1
- unsigned getFMA3OpcodeToCommuteOperands(MachineInstr *MI,
- unsigned SrcOpIdx1,
+ unsigned getFMA3OpcodeToCommuteOperands(MachineInstr &MI, unsigned SrcOpIdx1,
unsigned SrcOpIdx2) const;
// Branch analysis.
- bool isUnpredicatedTerminator(const MachineInstr* MI) const override;
- bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ bool isUnpredicatedTerminator(const MachineInstr &MI) const override;
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override;
- bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
- unsigned &Offset,
+ bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
+ int64_t &Offset,
const TargetRegisterInfo *TRI) const override;
- bool AnalyzeBranchPredicate(MachineBasicBlock &MBB,
+ bool analyzeBranchPredicate(MachineBasicBlock &MBB,
TargetInstrInfo::MachineBranchPredicate &MBP,
bool AllowModify = false) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const override;
+ const DebugLoc &DL) const override;
bool canInsertSelect(const MachineBasicBlock&, ArrayRef<MachineOperand> Cond,
unsigned, unsigned, int&, int&, int&) const override;
- void insertSelect(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DstReg, ArrayRef<MachineOperand> Cond,
- unsigned TrueReg, unsigned FalseReg) const override;
- void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
+ void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const DebugLoc &DL, unsigned DstReg,
+ ArrayRef<MachineOperand> Cond, unsigned TrueReg,
+ unsigned FalseReg) const override;
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
@@ -358,7 +355,7 @@ public:
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
- bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
+ bool expandPostRAPseudo(MachineInstr &MI) const override;
/// foldMemoryOperand - If this target supports it, fold a load or store of
/// the specified stack slot into the specified machine instruction for the
@@ -366,25 +363,27 @@ public:
/// folding and return true, otherwise it should return false. If it folds
/// the instruction, it is likely that the MachineInstruction the iterator
/// references has been changed.
- MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt,
- int FrameIndex) const override;
+ MachineInstr *
+ foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
+ ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, int FrameIndex,
+ LiveIntervals *LIS = nullptr) const override;
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
- MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt,
- MachineInstr *LoadMI) const override;
+ MachineInstr *foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
+ LiveIntervals *LIS = nullptr) const override;
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
/// possible, returns true as well as the new instructions by reference.
- bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
- unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
- SmallVectorImpl<MachineInstr*> &NewMIs) const override;
+ bool
+ unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg,
+ bool UnfoldLoad, bool UnfoldStore,
+ SmallVectorImpl<MachineInstr *> &NewMIs) const override;
bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
SmallVectorImpl<SDNode*> &NewNodes) const override;
@@ -419,8 +418,8 @@ public:
int64_t Offset1, int64_t Offset2,
unsigned NumLoads) const override;
- bool shouldScheduleAdjacent(MachineInstr* First,
- MachineInstr *Second) const override;
+ bool shouldScheduleAdjacent(MachineInstr &First,
+ MachineInstr &Second) const override;
void getNoopForMachoTarget(MCInst &NopInst) const override;
@@ -440,7 +439,7 @@ public:
/// True if MI has a condition code def, e.g. EFLAGS, that is
/// not marked dead.
- bool hasLiveCondCodeDef(MachineInstr *MI) const;
+ bool hasLiveCondCodeDef(MachineInstr &MI) const;
/// getGlobalBaseReg - Return a virtual register initialized with the
/// the global base register value. Output instructions required to
@@ -449,19 +448,19 @@ public:
unsigned getGlobalBaseReg(MachineFunction *MF) const;
std::pair<uint16_t, uint16_t>
- getExecutionDomain(const MachineInstr *MI) const override;
+ getExecutionDomain(const MachineInstr &MI) const override;
- void setExecutionDomain(MachineInstr *MI, unsigned Domain) const override;
+ void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override;
unsigned
- getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
- const TargetRegisterInfo *TRI) const override;
- unsigned getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum,
+ getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const override;
+ unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
const TargetRegisterInfo *TRI) const override;
- void breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
+ void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const override;
- MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
+ MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
unsigned OpNum,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
@@ -480,10 +479,10 @@ public:
bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
const MachineRegisterInfo *MRI,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI,
+ const MachineInstr &DefMI, unsigned DefIdx,
+ const MachineInstr &UseMI,
unsigned UseIdx) const override;
-
+
bool useMachineCombiner() const override {
return true;
}
@@ -501,14 +500,14 @@ public:
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
/// can be analyzed.
- bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
+ bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &CmpMask,
int &CmpValue) const override;
/// optimizeCompareInstr - Check if there exists an earlier instruction that
/// operates on the same source operands and sets flags in the same way as
/// Compare; remove Compare if possible.
- bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
+ bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int CmpMask, int CmpValue,
const MachineRegisterInfo *MRI) const override;
@@ -519,7 +518,7 @@ public:
/// defined by the load we are trying to fold. DefMI returns the machine
/// instruction that defines FoldAsLoadDefReg, and the function returns
/// the machine instruction generated due to folding.
- MachineInstr* optimizeLoadInstr(MachineInstr *MI,
+ MachineInstr *optimizeLoadInstr(MachineInstr &MI,
const MachineRegisterInfo *MRI,
unsigned &FoldAsLoadDefReg,
MachineInstr *&DefMI) const override;
@@ -542,19 +541,19 @@ protected:
/// non-commutable operands.
/// Even though the instruction is commutable, the method may still
/// fail to commute the operands, null pointer is returned in such cases.
- MachineInstr *commuteInstructionImpl(MachineInstr *MI, bool NewMI,
+ MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned CommuteOpIdx1,
unsigned CommuteOpIdx2) const override;
private:
- MachineInstr * convertToThreeAddressWithLEA(unsigned MIOpc,
- MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const;
+ MachineInstr *convertToThreeAddressWithLEA(unsigned MIOpc,
+ MachineFunction::iterator &MFI,
+ MachineInstr &MI,
+ LiveVariables *LV) const;
/// Handles memory folding for special case instructions, for instance those
/// requiring custom manipulation of the address.
- MachineInstr *foldMemoryOperandCustom(MachineFunction &MF, MachineInstr *MI,
+ MachineInstr *foldMemoryOperandCustom(MachineFunction &MF, MachineInstr &MI,
unsigned OpNum,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
@@ -562,8 +561,11 @@ private:
/// isFrameOperand - Return true and the FrameIndex if the specified
/// operand and follow operands form a reference to the stack frame.
- bool isFrameOperand(const MachineInstr *MI, unsigned int Op,
+ bool isFrameOperand(const MachineInstr &MI, unsigned int Op,
int &FrameIndex) const;
+
+ /// Expand the MOVImmSExti8 pseudo-instructions.
+ bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB) const;
};
} // End llvm namespace
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index 9c8339a841c90..b19a8f3306aaa 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -71,10 +71,18 @@ def SDTX86rdrand : SDTypeProfile<2, 0, [SDTCisInt<0>, SDTCisVT<1, i32>]>;
def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>,
SDTCisVT<2, i8>]>;
def SDTX86caspair : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
+def SDTX86caspairSaveEbx8 : SDTypeProfile<1, 3,
+ [SDTCisVT<0, i32>, SDTCisPtrTy<1>,
+ SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
+def SDTX86caspairSaveRbx16 : SDTypeProfile<1, 3,
+ [SDTCisVT<0, i64>, SDTCisPtrTy<1>,
+ SDTCisVT<2, i64>, SDTCisVT<3, i64>]>;
-def SDTX86atomicBinary : SDTypeProfile<2, 3, [SDTCisInt<0>, SDTCisInt<1>,
- SDTCisPtrTy<2>, SDTCisInt<3>,SDTCisInt<4>]>;
-def SDTX86Ret : SDTypeProfile<0, -1, [SDTCisVT<0, i16>]>;
+def SDTLockBinaryArithWithFlags : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
+ SDTCisPtrTy<1>,
+ SDTCisInt<2>]>;
+
+def SDTX86Ret : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>;
def SDT_X86CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
def SDT_X86CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
@@ -104,6 +112,8 @@ def SDT_X86TLSBASEADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDT_X86WIN_ALLOCA : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
+
def SDT_X86SEG_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
@@ -116,10 +126,6 @@ def X86MemBarrier : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIER,
[SDNPHasChain,SDNPSideEffect]>;
def X86MFence : SDNode<"X86ISD::MFENCE", SDT_X86MEMBARRIER,
[SDNPHasChain]>;
-def X86SFence : SDNode<"X86ISD::SFENCE", SDT_X86MEMBARRIER,
- [SDNPHasChain]>;
-def X86LFence : SDNode<"X86ISD::LFENCE", SDT_X86MEMBARRIER,
- [SDNPHasChain]>;
def X86bsf : SDNode<"X86ISD::BSF", SDTUnaryArithWithFlags>;
@@ -153,6 +159,14 @@ def X86cas8 : SDNode<"X86ISD::LCMPXCHG8_DAG", SDTX86caspair,
def X86cas16 : SDNode<"X86ISD::LCMPXCHG16_DAG", SDTX86caspair,
[SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
SDNPMayLoad, SDNPMemOperand]>;
+def X86cas8save_ebx : SDNode<"X86ISD::LCMPXCHG8_SAVE_EBX_DAG",
+ SDTX86caspairSaveEbx8,
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
+ SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def X86cas16save_rbx : SDNode<"X86ISD::LCMPXCHG16_SAVE_RBX_DAG",
+ SDTX86caspairSaveRbx16,
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
+ SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
@@ -214,6 +228,9 @@ def X86eh_sjlj_setjmp : SDNode<"X86ISD::EH_SJLJ_SETJMP",
def X86eh_sjlj_longjmp : SDNode<"X86ISD::EH_SJLJ_LONGJMP",
SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>,
[SDNPHasChain, SDNPSideEffect]>;
+def X86eh_sjlj_setup_dispatch : SDNode<"X86ISD::EH_SJLJ_SETUP_DISPATCH",
+ SDTypeProfile<0, 0, []>,
+ [SDNPHasChain, SDNPSideEffect]>;
def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
@@ -237,12 +254,28 @@ def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags,
def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags,
[SDNPCommutative]>;
+def X86lock_add : SDNode<"X86ISD::LADD", SDTLockBinaryArithWithFlags,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
+ SDNPMemOperand]>;
+def X86lock_sub : SDNode<"X86ISD::LSUB", SDTLockBinaryArithWithFlags,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
+ SDNPMemOperand]>;
+def X86lock_or : SDNode<"X86ISD::LOR", SDTLockBinaryArithWithFlags,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
+ SDNPMemOperand]>;
+def X86lock_xor : SDNode<"X86ISD::LXOR", SDTLockBinaryArithWithFlags,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
+ SDNPMemOperand]>;
+def X86lock_and : SDNode<"X86ISD::LAND", SDTLockBinaryArithWithFlags,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
+ SDNPMemOperand]>;
+
def X86bextr : SDNode<"X86ISD::BEXTR", SDTIntBinOp>;
def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
-def X86WinAlloca : SDNode<"X86ISD::WIN_ALLOCA", SDTX86Void,
- [SDNPHasChain, SDNPInGlue, SDNPOutGlue]>;
+def X86WinAlloca : SDNode<"X86ISD::WIN_ALLOCA", SDT_X86WIN_ALLOCA,
+ [SDNPHasChain, SDNPOutGlue]>;
def X86SegAlloca : SDNode<"X86ISD::SEG_ALLOCA", SDT_X86SEG_ALLOCA,
[SDNPHasChain]>;
@@ -263,7 +296,7 @@ def ptr_rc_nosp : PointerLikeRegClass<1>;
def X86MemAsmOperand : AsmOperandClass {
let Name = "Mem";
}
-let RenderMethod = "addMemOperands" in {
+let RenderMethod = "addMemOperands", SuperClasses = [X86MemAsmOperand] in {
def X86Mem8AsmOperand : AsmOperandClass { let Name = "Mem8"; }
def X86Mem16AsmOperand : AsmOperandClass { let Name = "Mem16"; }
def X86Mem32AsmOperand : AsmOperandClass { let Name = "Mem32"; }
@@ -273,16 +306,19 @@ let RenderMethod = "addMemOperands" in {
def X86Mem256AsmOperand : AsmOperandClass { let Name = "Mem256"; }
def X86Mem512AsmOperand : AsmOperandClass { let Name = "Mem512"; }
// Gather mem operands
- def X86MemVX32Operand : AsmOperandClass { let Name = "MemVX32"; }
- def X86MemVY32Operand : AsmOperandClass { let Name = "MemVY32"; }
- def X86MemVZ32Operand : AsmOperandClass { let Name = "MemVZ32"; }
- def X86MemVX64Operand : AsmOperandClass { let Name = "MemVX64"; }
- def X86MemVY64Operand : AsmOperandClass { let Name = "MemVY64"; }
- def X86MemVZ64Operand : AsmOperandClass { let Name = "MemVZ64"; }
- def X86MemVX32XOperand : AsmOperandClass { let Name = "MemVX32X"; }
- def X86MemVY32XOperand : AsmOperandClass { let Name = "MemVY32X"; }
- def X86MemVX64XOperand : AsmOperandClass { let Name = "MemVX64X"; }
- def X86MemVY64XOperand : AsmOperandClass { let Name = "MemVY64X"; }
+ def X86Mem64_RC128Operand : AsmOperandClass { let Name = "Mem64_RC128"; }
+ def X86Mem128_RC128Operand : AsmOperandClass { let Name = "Mem128_RC128"; }
+ def X86Mem256_RC128Operand : AsmOperandClass { let Name = "Mem256_RC128"; }
+ def X86Mem128_RC256Operand : AsmOperandClass { let Name = "Mem128_RC256"; }
+ def X86Mem256_RC256Operand : AsmOperandClass { let Name = "Mem256_RC256"; }
+
+ def X86Mem64_RC128XOperand : AsmOperandClass { let Name = "Mem64_RC128X"; }
+ def X86Mem128_RC128XOperand : AsmOperandClass { let Name = "Mem128_RC128X"; }
+ def X86Mem256_RC128XOperand : AsmOperandClass { let Name = "Mem256_RC128X"; }
+ def X86Mem128_RC256XOperand : AsmOperandClass { let Name = "Mem128_RC256X"; }
+ def X86Mem256_RC256XOperand : AsmOperandClass { let Name = "Mem256_RC256X"; }
+ def X86Mem512_RC256XOperand : AsmOperandClass { let Name = "Mem512_RC256X"; }
+ def X86Mem512_RC512Operand : AsmOperandClass { let Name = "Mem512_RC512"; }
}
def X86AbsMemAsmOperand : AsmOperandClass {
@@ -293,7 +329,7 @@ def X86AbsMemAsmOperand : AsmOperandClass {
class X86MemOperand<string printMethod,
AsmOperandClass parserMatchClass = X86MemAsmOperand> : Operand<iPTR> {
let PrintMethod = printMethod;
- let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
+ let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, SEGMENT_REG);
let ParserMatchClass = parserMatchClass;
let OperandType = "OPERAND_MEMORY";
}
@@ -302,7 +338,7 @@ class X86MemOperand<string printMethod,
class X86VMemOperand<RegisterClass RC, string printMethod,
AsmOperandClass parserMatchClass>
: X86MemOperand<printMethod, parserMatchClass> {
- let MIOperandInfo = (ops ptr_rc, i8imm, RC, i32imm, i8imm);
+ let MIOperandInfo = (ops ptr_rc, i8imm, RC, i32imm, SEGMENT_REG);
}
def anymem : X86MemOperand<"printanymem">;
@@ -329,17 +365,19 @@ def f512mem : X86MemOperand<"printf512mem", X86Mem512AsmOperand>;
def v512mem : X86VMemOperand<VR512, "printf512mem", X86Mem512AsmOperand>;
// Gather mem operands
-def vx32mem : X86VMemOperand<VR128, "printi32mem", X86MemVX32Operand>;
-def vy32mem : X86VMemOperand<VR256, "printi32mem", X86MemVY32Operand>;
-def vx64mem : X86VMemOperand<VR128, "printi64mem", X86MemVX64Operand>;
-def vy64mem : X86VMemOperand<VR256, "printi64mem", X86MemVY64Operand>;
-
-def vx32xmem : X86VMemOperand<VR128X, "printi32mem", X86MemVX32XOperand>;
-def vx64xmem : X86VMemOperand<VR128X, "printi32mem", X86MemVX64XOperand>;
-def vy32xmem : X86VMemOperand<VR256X, "printi32mem", X86MemVY32XOperand>;
-def vy64xmem : X86VMemOperand<VR256X, "printi64mem", X86MemVY64XOperand>;
-def vz32mem : X86VMemOperand<VR512, "printi32mem", X86MemVZ32Operand>;
-def vz64mem : X86VMemOperand<VR512, "printi64mem", X86MemVZ64Operand>;
+def vx64mem : X86VMemOperand<VR128, "printi64mem", X86Mem64_RC128Operand>;
+def vx128mem : X86VMemOperand<VR128, "printi128mem", X86Mem128_RC128Operand>;
+def vx256mem : X86VMemOperand<VR128, "printi256mem", X86Mem256_RC128Operand>;
+def vy128mem : X86VMemOperand<VR256, "printi128mem", X86Mem128_RC256Operand>;
+def vy256mem : X86VMemOperand<VR256, "printi256mem", X86Mem256_RC256Operand>;
+
+def vx64xmem : X86VMemOperand<VR128X, "printi64mem", X86Mem64_RC128XOperand>;
+def vx128xmem : X86VMemOperand<VR128X, "printi128mem", X86Mem128_RC128XOperand>;
+def vx256xmem : X86VMemOperand<VR128X, "printi256mem", X86Mem256_RC128XOperand>;
+def vy128xmem : X86VMemOperand<VR256, "printi128mem", X86Mem128_RC256XOperand>;
+def vy256xmem : X86VMemOperand<VR256X, "printi256mem", X86Mem256_RC256XOperand>;
+def vy512mem : X86VMemOperand<VR256X, "printi512mem", X86Mem512_RC256XOperand>;
+def vz512mem : X86VMemOperand<VR512, "printi512mem", X86Mem512_RC512Operand>;
// A version of i8mem for use on x86-64 and x32 that uses a NOREX GPR instead
// of a plain GPR, so that it doesn't potentially require a REX prefix.
@@ -348,7 +386,8 @@ def ptr_rc_norex_nosp : PointerLikeRegClass<3>;
def i8mem_NOREX : Operand<iPTR> {
let PrintMethod = "printi8mem";
- let MIOperandInfo = (ops ptr_rc_norex, i8imm, ptr_rc_norex_nosp, i32imm, i8imm);
+ let MIOperandInfo = (ops ptr_rc_norex, i8imm, ptr_rc_norex_nosp, i32imm,
+ SEGMENT_REG);
let ParserMatchClass = X86Mem8AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
@@ -363,7 +402,7 @@ def ptr_rc_tailcall : PointerLikeRegClass<4>;
def i32mem_TC : Operand<i32> {
let PrintMethod = "printi32mem";
let MIOperandInfo = (ops ptr_rc_tailcall, i8imm, ptr_rc_tailcall,
- i32imm, i8imm);
+ i32imm, SEGMENT_REG);
let ParserMatchClass = X86Mem32AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
@@ -374,7 +413,7 @@ def i32mem_TC : Operand<i32> {
def i64mem_TC : Operand<i64> {
let PrintMethod = "printi64mem";
let MIOperandInfo = (ops ptr_rc_tailcall, i8imm,
- ptr_rc_tailcall, i32imm, i8imm);
+ ptr_rc_tailcall, i32imm, SEGMENT_REG);
let ParserMatchClass = X86Mem64AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
@@ -494,7 +533,7 @@ let RenderMethod = "addMemOffsOperands" in {
class X86SrcIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
: X86MemOperand<printMethod, parserMatchClass> {
- let MIOperandInfo = (ops ptr_rc, i8imm);
+ let MIOperandInfo = (ops ptr_rc, SEGMENT_REG);
}
class X86DstIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
@@ -514,7 +553,7 @@ def dstidx64 : X86DstIdxOperand<"printDstIdx64", X86DstIdx64Operand>;
class X86MemOffsOperand<Operand immOperand, string printMethod,
AsmOperandClass parserMatchClass>
: X86MemOperand<printMethod, parserMatchClass> {
- let MIOperandInfo = (ops immOperand, i8imm);
+ let MIOperandInfo = (ops immOperand, SEGMENT_REG);
}
def offset16_8 : X86MemOffsOperand<i16imm, "printMemOffs8",
@@ -681,14 +720,14 @@ def i64i32imm_pcrel : Operand<i64> {
def lea64_32mem : Operand<i32> {
let PrintMethod = "printanymem";
- let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, i8imm);
+ let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
let ParserMatchClass = X86MemAsmOperand;
}
// Memory operands that use 64-bit pointers in both ILP32 and LP64.
def lea64mem : Operand<i64> {
let PrintMethod = "printanymem";
- let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, i8imm);
+ let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
let ParserMatchClass = X86MemAsmOperand;
}
@@ -728,6 +767,8 @@ def vectoraddr : ComplexPattern<iPTR, 5, "selectVectorAddr", [],[SDNPWantParent]
//===----------------------------------------------------------------------===//
// X86 Instruction Predicate Definitions.
+def TruePredicate : Predicate<"true">;
+
def HasCMov : Predicate<"Subtarget->hasCMov()">;
def NoCMov : Predicate<"!Subtarget->hasCMov()">;
@@ -773,7 +814,7 @@ def HasVLX : Predicate<"Subtarget->hasVLX()">,
def NoVLX : Predicate<"!Subtarget->hasVLX()">;
def NoVLX_Or_NoBWI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasBWI()">;
def NoVLX_Or_NoDQI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasDQI()">;
-def PKU : Predicate<"!Subtarget->hasPKU()">;
+def PKU : Predicate<"Subtarget->hasPKU()">;
def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;
def HasAES : Predicate<"Subtarget->hasAES()">;
@@ -795,6 +836,10 @@ def HasFSGSBase : Predicate<"Subtarget->hasFSGSBase()">;
def HasLZCNT : Predicate<"Subtarget->hasLZCNT()">;
def HasBMI : Predicate<"Subtarget->hasBMI()">;
def HasBMI2 : Predicate<"Subtarget->hasBMI2()">;
+def HasVBMI : Predicate<"Subtarget->hasVBMI()">,
+ AssemblerPredicate<"FeatureVBMI", "AVX-512 VBMI ISA">;
+def HasIFMA : Predicate<"Subtarget->hasIFMA()">,
+ AssemblerPredicate<"FeatureIFMA", "AVX-512 IFMA ISA">;
def HasRTM : Predicate<"Subtarget->hasRTM()">;
def HasHLE : Predicate<"Subtarget->hasHLE()">;
def HasTSX : Predicate<"Subtarget->hasRTM() || Subtarget->hasHLE()">;
@@ -804,6 +849,7 @@ def HasPRFCHW : Predicate<"Subtarget->hasPRFCHW()">;
def HasRDSEED : Predicate<"Subtarget->hasRDSEED()">;
def HasPrefetchW : Predicate<"Subtarget->hasPRFCHW()">;
def HasLAHFSAHF : Predicate<"Subtarget->hasLAHFSAHF()">;
+def HasMWAITX : Predicate<"Subtarget->hasMWAITX()">;
def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
def HasMPX : Predicate<"Subtarget->hasMPX()">;
@@ -822,6 +868,8 @@ def In32BitMode : Predicate<"Subtarget->is32Bit()">,
AssemblerPredicate<"Mode32Bit", "32-bit mode">;
def IsWin64 : Predicate<"Subtarget->isTargetWin64()">;
def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">;
+def NotWin64WithoutFP : Predicate<"!Subtarget->isTargetWin64() ||"
+ "Subtarget->getFrameLowering()->hasFP(*MF)">;
def IsPS4 : Predicate<"Subtarget->isTargetPS4()">;
def NotPS4 : Predicate<"!Subtarget->isTargetPS4()">;
def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
@@ -832,15 +880,16 @@ def FarData : Predicate<"TM.getCodeModel() != CodeModel::Small &&"
"TM.getCodeModel() != CodeModel::Kernel">;
def NearData : Predicate<"TM.getCodeModel() == CodeModel::Small ||"
"TM.getCodeModel() == CodeModel::Kernel">;
-def IsStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">;
-def IsNotPIC : Predicate<"TM.getRelocationModel() != Reloc::PIC_">;
+def IsNotPIC : Predicate<"!TM.isPositionIndependent()">;
def OptForSize : Predicate<"OptForSize">;
+def OptForMinSize : Predicate<"OptForMinSize">;
def OptForSpeed : Predicate<"!OptForSize">;
def FastBTMem : Predicate<"!Subtarget->isBTMemSlow()">;
-def CallImmAddr : Predicate<"Subtarget->IsLegalToCallImmediateAddr(TM)">;
+def CallImmAddr : Predicate<"Subtarget->isLegalToCallImmediateAddr()">;
def FavorMemIndirectCall : Predicate<"!Subtarget->callRegIndirect()">;
def NotSlowIncDec : Predicate<"!Subtarget->slowIncDec()">;
def HasFastMem32 : Predicate<"!Subtarget->isUnalignedMem32Slow()">;
+def HasMFence : Predicate<"Subtarget->hasMFence()">;
//===----------------------------------------------------------------------===//
// X86 Instruction Format Definitions.
@@ -871,12 +920,6 @@ def X86_COND_O : PatLeaf<(i8 13)>;
def X86_COND_P : PatLeaf<(i8 14)>; // alt. COND_PE
def X86_COND_S : PatLeaf<(i8 15)>;
-// Predicate used to help when pattern matching LZCNT/TZCNT.
-def X86_COND_E_OR_NE : ImmLeaf<i8, [{
- return (Imm == X86::COND_E) || (Imm == X86::COND_NE);
-}]>;
-
-
def i16immSExt8 : ImmLeaf<i16, [{ return isInt<8>(Imm); }]>;
def i32immSExt8 : ImmLeaf<i32, [{ return isInt<8>(Imm); }]>;
def i64immSExt8 : ImmLeaf<i64, [{ return isInt<8>(Imm); }]>;
@@ -1042,6 +1085,10 @@ def LEAVE64 : I<0xC9, RawFrm,
// Miscellaneous Instructions.
//
+let isBarrier = 1, hasSideEffects = 1, usesCustomInserter = 1 in
+ def Int_eh_sjlj_setup_dispatch
+ : PseudoI<(outs), (ins), [(X86eh_sjlj_setup_dispatch)]>;
+
let Defs = [ESP], Uses = [ESP], hasSideEffects=0 in {
let mayLoad = 1, SchedRW = [WriteLoad] in {
def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", [],
@@ -1092,12 +1139,12 @@ def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src",[],
let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
SchedRW = [WriteRMW], Defs = [ESP] in {
- let Uses = [ESP, EFLAGS] in
+ let Uses = [ESP] in
def RDFLAGS32 : PseudoI<(outs GR32:$dst), (ins),
[(set GR32:$dst, (int_x86_flags_read_u32))]>,
Requires<[Not64BitMode]>;
- let Uses = [RSP, EFLAGS] in
+ let Uses = [RSP] in
def RDFLAGS64 : PseudoI<(outs GR64:$dst), (ins),
[(set GR64:$dst, (int_x86_flags_read_u64))]>,
Requires<[In64BitMode]>;
@@ -1253,28 +1300,28 @@ def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
let SchedRW = [WriteMicrocoded] in {
// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
let Defs = [EDI,ESI], Uses = [EDI,ESI,EFLAGS] in {
-def MOVSB : I<0xA4, RawFrmDstSrc, (outs dstidx8:$dst), (ins srcidx8:$src),
+def MOVSB : I<0xA4, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
"movsb\t{$src, $dst|$dst, $src}", [], IIC_MOVS>;
-def MOVSW : I<0xA5, RawFrmDstSrc, (outs dstidx16:$dst), (ins srcidx16:$src),
+def MOVSW : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
"movsw\t{$src, $dst|$dst, $src}", [], IIC_MOVS>, OpSize16;
-def MOVSL : I<0xA5, RawFrmDstSrc, (outs dstidx32:$dst), (ins srcidx32:$src),
+def MOVSL : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
"movs{l|d}\t{$src, $dst|$dst, $src}", [], IIC_MOVS>, OpSize32;
-def MOVSQ : RI<0xA5, RawFrmDstSrc, (outs dstidx64:$dst), (ins srcidx64:$src),
+def MOVSQ : RI<0xA5, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
"movsq\t{$src, $dst|$dst, $src}", [], IIC_MOVS>;
}
// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
let Defs = [EDI], Uses = [AL,EDI,EFLAGS] in
-def STOSB : I<0xAA, RawFrmDst, (outs dstidx8:$dst), (ins),
+def STOSB : I<0xAA, RawFrmDst, (outs), (ins dstidx8:$dst),
"stosb\t{%al, $dst|$dst, al}", [], IIC_STOS>;
let Defs = [EDI], Uses = [AX,EDI,EFLAGS] in
-def STOSW : I<0xAB, RawFrmDst, (outs dstidx16:$dst), (ins),
+def STOSW : I<0xAB, RawFrmDst, (outs), (ins dstidx16:$dst),
"stosw\t{%ax, $dst|$dst, ax}", [], IIC_STOS>, OpSize16;
let Defs = [EDI], Uses = [EAX,EDI,EFLAGS] in
-def STOSL : I<0xAB, RawFrmDst, (outs dstidx32:$dst), (ins),
+def STOSL : I<0xAB, RawFrmDst, (outs), (ins dstidx32:$dst),
"stos{l|d}\t{%eax, $dst|$dst, eax}", [], IIC_STOS>, OpSize32;
let Defs = [RDI], Uses = [RAX,RDI,EFLAGS] in
-def STOSQ : RI<0xAB, RawFrmDst, (outs dstidx64:$dst), (ins),
+def STOSQ : RI<0xAB, RawFrmDst, (outs), (ins dstidx64:$dst),
"stosq\t{%rax, $dst|$dst, rax}", [], IIC_STOS>;
// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
@@ -1402,30 +1449,30 @@ def MOV32ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_32:$src),
}
let mayStore = 1 in {
let Uses = [AL] in
-def MOV8o32a : Ii32<0xA2, RawFrmMemOffs, (outs offset32_8:$dst), (ins),
+def MOV8o32a : Ii32<0xA2, RawFrmMemOffs, (outs), (ins offset32_8:$dst),
"mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>, AdSize32;
let Uses = [AX] in
-def MOV16o32a : Ii32<0xA3, RawFrmMemOffs, (outs offset32_16:$dst), (ins),
+def MOV16o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_16:$dst),
"mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>,
OpSize16, AdSize32;
let Uses = [EAX] in
-def MOV32o32a : Ii32<0xA3, RawFrmMemOffs, (outs offset32_32:$dst), (ins),
+def MOV32o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_32:$dst),
"mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>,
OpSize32, AdSize32;
let Uses = [RAX] in
-def MOV64o32a : RIi32<0xA3, RawFrmMemOffs, (outs offset32_64:$dst), (ins),
+def MOV64o32a : RIi32<0xA3, RawFrmMemOffs, (outs), (ins offset32_64:$dst),
"mov{q}\t{%rax, $dst|$dst, rax}", [], IIC_MOV_MEM>,
AdSize32;
let Uses = [AL] in
-def MOV8o16a : Ii16<0xA2, RawFrmMemOffs, (outs offset16_8:$dst), (ins),
+def MOV8o16a : Ii16<0xA2, RawFrmMemOffs, (outs), (ins offset16_8:$dst),
"mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>, AdSize16;
let Uses = [AX] in
-def MOV16o16a : Ii16<0xA3, RawFrmMemOffs, (outs offset16_16:$dst), (ins),
+def MOV16o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_16:$dst),
"mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>,
OpSize16, AdSize16;
let Uses = [EAX] in
-def MOV32o16a : Ii16<0xA3, RawFrmMemOffs, (outs offset16_32:$dst), (ins),
+def MOV32o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_32:$dst),
"mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>,
OpSize32, AdSize16;
}
@@ -1451,17 +1498,17 @@ def MOV64ao64 : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64_64:$src),
let mayStore = 1 in {
let Uses = [AL] in
-def MOV8o64a : RIi64_NOREX<0xA2, RawFrmMemOffs, (outs offset64_8:$dst), (ins),
+def MOV8o64a : RIi64_NOREX<0xA2, RawFrmMemOffs, (outs), (ins offset64_8:$dst),
"movabs{b}\t{%al, $dst|$dst, al}", []>, AdSize64;
let Uses = [AX] in
-def MOV16o64a : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset64_16:$dst), (ins),
+def MOV16o64a : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs), (ins offset64_16:$dst),
"movabs{w}\t{%ax, $dst|$dst, ax}", []>, OpSize16, AdSize64;
let Uses = [EAX] in
-def MOV32o64a : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset64_32:$dst), (ins),
+def MOV32o64a : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs), (ins offset64_32:$dst),
"movabs{l}\t{%eax, $dst|$dst, eax}", []>, OpSize32,
AdSize64;
let Uses = [RAX] in
-def MOV64o64a : RIi64<0xA3, RawFrmMemOffs, (outs offset64_64:$dst), (ins),
+def MOV64o64a : RIi64<0xA3, RawFrmMemOffs, (outs), (ins offset64_64:$dst),
"movabs{q}\t{%rax, $dst|$dst, rax}", []>, AdSize64;
}
} // hasSideEffects = 0
@@ -1951,11 +1998,11 @@ def OUTSL : I<0x6F, RawFrmSrc, (outs), (ins srcidx32:$src),
// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
let Defs = [EDI], Uses = [DX,EDI,EFLAGS] in {
-def INSB : I<0x6C, RawFrmDst, (outs dstidx8:$dst), (ins),
+def INSB : I<0x6C, RawFrmDst, (outs), (ins dstidx8:$dst),
"insb\t{%dx, $dst|$dst, dx}", [], IIC_INS>;
-def INSW : I<0x6D, RawFrmDst, (outs dstidx16:$dst), (ins),
+def INSW : I<0x6D, RawFrmDst, (outs), (ins dstidx16:$dst),
"insw\t{%dx, $dst|$dst, dx}", [], IIC_INS>, OpSize16;
-def INSL : I<0x6D, RawFrmDst, (outs dstidx32:$dst), (ins),
+def INSL : I<0x6D, RawFrmDst, (outs), (ins dstidx32:$dst),
"ins{l|d}\t{%dx, $dst|$dst, dx}", [], IIC_INS>, OpSize32;
}
}
@@ -2124,46 +2171,6 @@ let Predicates = [HasLZCNT], Defs = [EFLAGS] in {
(implicit EFLAGS)]>, XS;
}
-let Predicates = [HasLZCNT] in {
- def : Pat<(X86cmov (ctlz GR16:$src), (i16 16), (X86_COND_E_OR_NE),
- (X86cmp GR16:$src, (i16 0))),
- (LZCNT16rr GR16:$src)>;
- def : Pat<(X86cmov (ctlz GR32:$src), (i32 32), (X86_COND_E_OR_NE),
- (X86cmp GR32:$src, (i32 0))),
- (LZCNT32rr GR32:$src)>;
- def : Pat<(X86cmov (ctlz GR64:$src), (i64 64), (X86_COND_E_OR_NE),
- (X86cmp GR64:$src, (i64 0))),
- (LZCNT64rr GR64:$src)>;
- def : Pat<(X86cmov (i16 16), (ctlz GR16:$src), (X86_COND_E_OR_NE),
- (X86cmp GR16:$src, (i16 0))),
- (LZCNT16rr GR16:$src)>;
- def : Pat<(X86cmov (i32 32), (ctlz GR32:$src), (X86_COND_E_OR_NE),
- (X86cmp GR32:$src, (i32 0))),
- (LZCNT32rr GR32:$src)>;
- def : Pat<(X86cmov (i64 64), (ctlz GR64:$src), (X86_COND_E_OR_NE),
- (X86cmp GR64:$src, (i64 0))),
- (LZCNT64rr GR64:$src)>;
-
- def : Pat<(X86cmov (ctlz (loadi16 addr:$src)), (i16 16), (X86_COND_E_OR_NE),
- (X86cmp (loadi16 addr:$src), (i16 0))),
- (LZCNT16rm addr:$src)>;
- def : Pat<(X86cmov (ctlz (loadi32 addr:$src)), (i32 32), (X86_COND_E_OR_NE),
- (X86cmp (loadi32 addr:$src), (i32 0))),
- (LZCNT32rm addr:$src)>;
- def : Pat<(X86cmov (ctlz (loadi64 addr:$src)), (i64 64), (X86_COND_E_OR_NE),
- (X86cmp (loadi64 addr:$src), (i64 0))),
- (LZCNT64rm addr:$src)>;
- def : Pat<(X86cmov (i16 16), (ctlz (loadi16 addr:$src)), (X86_COND_E_OR_NE),
- (X86cmp (loadi16 addr:$src), (i16 0))),
- (LZCNT16rm addr:$src)>;
- def : Pat<(X86cmov (i32 32), (ctlz (loadi32 addr:$src)), (X86_COND_E_OR_NE),
- (X86cmp (loadi32 addr:$src), (i32 0))),
- (LZCNT32rm addr:$src)>;
- def : Pat<(X86cmov (i64 64), (ctlz (loadi64 addr:$src)), (X86_COND_E_OR_NE),
- (X86cmp (loadi64 addr:$src), (i64 0))),
- (LZCNT64rm addr:$src)>;
-}
-
//===----------------------------------------------------------------------===//
// BMI Instructions
//
@@ -2240,46 +2247,6 @@ let Predicates = [HasBMI] in {
(BLSI64rr GR64:$src)>;
}
-let Predicates = [HasBMI] in {
- def : Pat<(X86cmov (cttz GR16:$src), (i16 16), (X86_COND_E_OR_NE),
- (X86cmp GR16:$src, (i16 0))),
- (TZCNT16rr GR16:$src)>;
- def : Pat<(X86cmov (cttz GR32:$src), (i32 32), (X86_COND_E_OR_NE),
- (X86cmp GR32:$src, (i32 0))),
- (TZCNT32rr GR32:$src)>;
- def : Pat<(X86cmov (cttz GR64:$src), (i64 64), (X86_COND_E_OR_NE),
- (X86cmp GR64:$src, (i64 0))),
- (TZCNT64rr GR64:$src)>;
- def : Pat<(X86cmov (i16 16), (cttz GR16:$src), (X86_COND_E_OR_NE),
- (X86cmp GR16:$src, (i16 0))),
- (TZCNT16rr GR16:$src)>;
- def : Pat<(X86cmov (i32 32), (cttz GR32:$src), (X86_COND_E_OR_NE),
- (X86cmp GR32:$src, (i32 0))),
- (TZCNT32rr GR32:$src)>;
- def : Pat<(X86cmov (i64 64), (cttz GR64:$src), (X86_COND_E_OR_NE),
- (X86cmp GR64:$src, (i64 0))),
- (TZCNT64rr GR64:$src)>;
-
- def : Pat<(X86cmov (cttz (loadi16 addr:$src)), (i16 16), (X86_COND_E_OR_NE),
- (X86cmp (loadi16 addr:$src), (i16 0))),
- (TZCNT16rm addr:$src)>;
- def : Pat<(X86cmov (cttz (loadi32 addr:$src)), (i32 32), (X86_COND_E_OR_NE),
- (X86cmp (loadi32 addr:$src), (i32 0))),
- (TZCNT32rm addr:$src)>;
- def : Pat<(X86cmov (cttz (loadi64 addr:$src)), (i64 64), (X86_COND_E_OR_NE),
- (X86cmp (loadi64 addr:$src), (i64 0))),
- (TZCNT64rm addr:$src)>;
- def : Pat<(X86cmov (i16 16), (cttz (loadi16 addr:$src)), (X86_COND_E_OR_NE),
- (X86cmp (loadi16 addr:$src), (i16 0))),
- (TZCNT16rm addr:$src)>;
- def : Pat<(X86cmov (i32 32), (cttz (loadi32 addr:$src)), (X86_COND_E_OR_NE),
- (X86cmp (loadi32 addr:$src), (i32 0))),
- (TZCNT32rm addr:$src)>;
- def : Pat<(X86cmov (i64 64), (cttz (loadi64 addr:$src)), (X86_COND_E_OR_NE),
- (X86cmp (loadi64 addr:$src), (i64 0))),
- (TZCNT64rm addr:$src)>;
-}
-
multiclass bmi_bextr_bzhi<bits<8> opc, string mnemonic, RegisterClass RC,
X86MemOperand x86memop, Intrinsic Int,
@@ -2440,22 +2407,34 @@ defm TZMSK : tbm_binary_intr<0x01, "tzmsk", MRM4r, MRM4m>;
//===----------------------------------------------------------------------===//
// MONITORX/MWAITX Instructions
//
-let SchedRW = [WriteSystem] in {
-let Uses = [EAX, ECX, EDX] in
-def MONITORXrrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", [],
- IIC_SSE_MONITOR>, TB;
-let Uses = [ECX, EAX, EBX] in
-def MWAITXrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx", [], IIC_SSE_MWAIT>,
- TB;
+let SchedRW = [ WriteSystem ] in {
+ let usesCustomInserter = 1 in {
+ def MONITORX : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
+ [(int_x86_monitorx addr:$src1, GR32:$src2, GR32:$src3)]>,
+ Requires<[ HasMWAITX ]>;
+ }
+
+ let Uses = [ EAX, ECX, EDX ] in {
+ def MONITORXrrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", [], IIC_SSE_MONITORX>,
+ TB, Requires<[ HasMWAITX ]>;
+ }
+
+ let Uses = [ ECX, EAX, EBX ] in {
+ def MWAITXrrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx",
+ [(int_x86_mwaitx ECX, EAX, EBX)], IIC_SSE_MWAITX>,
+ TB, Requires<[ HasMWAITX ]>;
+ }
} // SchedRW
-def : InstAlias<"mwaitx\t{%eax, %ecx, %ebx|ebx, ecx, eax}", (MWAITXrr)>, Requires<[Not64BitMode]>;
-def : InstAlias<"mwaitx\t{%rax, %rcx, %rbx|rbx, rcx, rax}", (MWAITXrr)>, Requires<[In64BitMode]>;
+def : InstAlias<"mwaitx\t{%eax, %ecx, %ebx|ebx, ecx, eax}", (MWAITXrrr)>,
+ Requires<[ Not64BitMode ]>;
+def : InstAlias<"mwaitx\t{%rax, %rcx, %rbx|rbx, rcx, rax}", (MWAITXrrr)>,
+ Requires<[ In64BitMode ]>;
def : InstAlias<"monitorx\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORXrrr)>,
- Requires<[Not64BitMode]>;
+ Requires<[ Not64BitMode ]>;
def : InstAlias<"monitorx\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORXrrr)>,
- Requires<[In64BitMode]>;
+ Requires<[ In64BitMode ]>;
//===----------------------------------------------------------------------===//
// CLZERO Instruction
@@ -2535,7 +2514,7 @@ let Predicates = [HasTBM] in {
//
def CLFLUSHOPT : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
- "clflushopt\t$src", []>, PD;
+ "clflushopt\t$src", [(int_x86_clflushopt addr:$src)]>, PD;
def CLWB : I<0xAE, MRM6m, (outs), (ins i8mem:$src), "clwb\t$src", []>, PD;
def PCOMMIT : I<0xAE, MRM_F8, (outs), (ins), "pcommit", []>, PD;
@@ -2781,6 +2760,11 @@ def : InstAlias<"lods\t{$src, %al|al, $src}", (LODSB srcidx8:$src), 0>;
def : InstAlias<"lods\t{$src, %ax|ax, $src}", (LODSW srcidx16:$src), 0>;
def : InstAlias<"lods\t{$src, %eax|eax, $src}", (LODSL srcidx32:$src), 0>;
def : InstAlias<"lods\t{$src, %rax|rax, $src}", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
+def : InstAlias<"lods\t$src", (LODSB srcidx8:$src), 0>;
+def : InstAlias<"lods\t$src", (LODSW srcidx16:$src), 0>;
+def : InstAlias<"lods\t$src", (LODSL srcidx32:$src), 0>;
+def : InstAlias<"lods\t$src", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
+
// stos aliases. Accept the source being omitted because it's implicit in
// the mnemonic, or the mnemonic suffix being omitted because it's implicit
@@ -2793,6 +2777,11 @@ def : InstAlias<"stos\t{%al, $dst|$dst, al}", (STOSB dstidx8:$dst), 0>;
def : InstAlias<"stos\t{%ax, $dst|$dst, ax}", (STOSW dstidx16:$dst), 0>;
def : InstAlias<"stos\t{%eax, $dst|$dst, eax}", (STOSL dstidx32:$dst), 0>;
def : InstAlias<"stos\t{%rax, $dst|$dst, rax}", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
+def : InstAlias<"stos\t$dst", (STOSB dstidx8:$dst), 0>;
+def : InstAlias<"stos\t$dst", (STOSW dstidx16:$dst), 0>;
+def : InstAlias<"stos\t$dst", (STOSL dstidx32:$dst), 0>;
+def : InstAlias<"stos\t$dst", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
+
// scas aliases. Accept the destination being omitted because it's implicit
// in the mnemonic, or the mnemonic suffix being omitted because it's implicit
@@ -2805,6 +2794,24 @@ def : InstAlias<"scas\t{$dst, %al|al, $dst}", (SCASB dstidx8:$dst), 0>;
def : InstAlias<"scas\t{$dst, %ax|ax, $dst}", (SCASW dstidx16:$dst), 0>;
def : InstAlias<"scas\t{$dst, %eax|eax, $dst}", (SCASL dstidx32:$dst), 0>;
def : InstAlias<"scas\t{$dst, %rax|rax, $dst}", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
+def : InstAlias<"scas\t$dst", (SCASB dstidx8:$dst), 0>;
+def : InstAlias<"scas\t$dst", (SCASW dstidx16:$dst), 0>;
+def : InstAlias<"scas\t$dst", (SCASL dstidx32:$dst), 0>;
+def : InstAlias<"scas\t$dst", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
+
+// cmps aliases. Mnemonic suffix being omitted because it's implicit
+// in the destination.
+def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSB dstidx8:$dst, srcidx8:$src), 0>;
+def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSW dstidx16:$dst, srcidx16:$src), 0>;
+def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSL dstidx32:$dst, srcidx32:$src), 0>;
+def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSQ dstidx64:$dst, srcidx64:$src), 0>, Requires<[In64BitMode]>;
+
+// movs aliases. Mnemonic suffix being omitted because it's implicit
+// in the destination.
+def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSB dstidx8:$dst, srcidx8:$src), 0>;
+def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSW dstidx16:$dst, srcidx16:$src), 0>;
+def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSL dstidx32:$dst, srcidx32:$src), 0>;
+def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSQ dstidx64:$dst, srcidx64:$src), 0>, Requires<[In64BitMode]>;
// div and idiv aliases for explicit A register.
def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8r GR8 :$src)>;
@@ -2892,8 +2899,8 @@ def : InstAlias<"fnstsw" , (FNSTSW16r)>;
// lcall and ljmp aliases. This seems to be an odd mapping in 64-bit mode, but
// this is compatible with what GAS does.
-def : InstAlias<"lcall\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg), 0>, Requires<[Not16BitMode]>;
-def : InstAlias<"ljmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg), 0>, Requires<[Not16BitMode]>;
+def : InstAlias<"lcall\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
+def : InstAlias<"ljmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
def : InstAlias<"lcall\t{*}$dst", (FARCALL32m opaque48mem:$dst), 0>, Requires<[Not16BitMode]>;
def : InstAlias<"ljmp\t{*}$dst", (FARJMP32m opaque48mem:$dst), 0>, Requires<[Not16BitMode]>;
def : InstAlias<"lcall\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
@@ -2917,6 +2924,18 @@ def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri8 GR32:$r, GR32:$r, i3
def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri32 GR64:$r, GR64:$r, i64i32imm:$imm), 0>;
def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm), 0>;
+// ins aliases. Accept the mnemonic suffix being omitted because it's implicit
+// in the destination.
+def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSB dstidx8:$dst), 0>;
+def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSW dstidx16:$dst), 0>;
+def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSL dstidx32:$dst), 0>;
+
+// outs aliases. Accept the mnemonic suffix being omitted because it's implicit
+// in the source.
+def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSB srcidx8:$src), 0>;
+def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSW srcidx16:$src), 0>;
+def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSL srcidx32:$src), 0>;
+
// inb %dx -> inb %al, %dx
def : InstAlias<"inb\t{%dx|dx}", (IN8rr), 0>;
def : InstAlias<"inw\t{%dx|dx}", (IN16rr), 0>;
@@ -2929,12 +2948,12 @@ def : InstAlias<"inl\t$port", (IN32ri u8imm:$port), 0>;
// jmp and call aliases for lcall and ljmp. jmp $42,$5 -> ljmp
def : InstAlias<"call\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
def : InstAlias<"jmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
-def : InstAlias<"call\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[Not16BitMode]>;
-def : InstAlias<"jmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[Not16BitMode]>;
-def : InstAlias<"callw\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>;
-def : InstAlias<"jmpw\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>;
-def : InstAlias<"calll\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>;
-def : InstAlias<"jmpl\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>;
+def : InstAlias<"call\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
+def : InstAlias<"jmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
+def : InstAlias<"callw\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
+def : InstAlias<"jmpw\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
+def : InstAlias<"calll\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
+def : InstAlias<"jmpl\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
// Force mov without a suffix with a segment and mem to prefer the 'l' form of
// the move. All segment/mem forms are equivalent, this has the shortest
diff --git a/lib/Target/X86/X86InstrMMX.td b/lib/Target/X86/X86InstrMMX.td
index 83f9b1409f615..8d70691714ddb 100644
--- a/lib/Target/X86/X86InstrMMX.td
+++ b/lib/Target/X86/X86InstrMMX.td
@@ -282,7 +282,7 @@ def MMX_MOVQ64rr_REV : MMXI<0x7F, MRMDestReg, (outs VR64:$dst), (ins VR64:$src),
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
def MMX_MOVD64from64rm : MMXRI<0x7E, MRMDestMem,
- (outs i64mem:$dst), (ins VR64:$src),
+ (outs), (ins i64mem:$dst, VR64:$src),
"movd\t{$src, $dst|$dst, $src}",
[], IIC_MMX_MOV_REG_MM>, Sched<[WriteStore]>;
diff --git a/lib/Target/X86/X86InstrMPX.td b/lib/Target/X86/X86InstrMPX.td
index 71ab97374dd63..309f601d1fcee 100644
--- a/lib/Target/X86/X86InstrMPX.td
+++ b/lib/Target/X86/X86InstrMPX.td
@@ -55,10 +55,10 @@ def BNDMOVRM64rm : RI<0x1A, MRMSrcMem, (outs BNDR:$dst), (ins i128mem:$src),
def BNDMOVMRrr : I<0x1B, MRMDestReg, (outs BNDR:$dst), (ins BNDR:$src),
"bndmov\t{$src, $dst|$dst, $src}", []>, PD,
Requires<[HasMPX]>;
-def BNDMOVMR32mr : I<0x1B, MRMDestMem, (outs i64mem:$dst), (ins BNDR:$src),
+def BNDMOVMR32mr : I<0x1B, MRMDestMem, (outs), (ins i64mem:$dst, BNDR:$src),
"bndmov\t{$src, $dst|$dst, $src}", []>, PD,
Requires<[HasMPX, Not64BitMode]>;
-def BNDMOVMR64mr : RI<0x1B, MRMDestMem, (outs i128mem:$dst), (ins BNDR:$src),
+def BNDMOVMR64mr : RI<0x1B, MRMDestMem, (outs), (ins i128mem:$dst, BNDR:$src),
"bndmov\t{$src, $dst|$dst, $src}", []>, PD,
Requires<[HasMPX, In64BitMode]>;
diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td
index 6a7c45665e9c1..9a98f5cac2ee1 100644
--- a/lib/Target/X86/X86InstrSSE.td
+++ b/lib/Target/X86/X86InstrSSE.td
@@ -382,75 +382,71 @@ def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
// Bitcasts between 128-bit vector types. Return the original type since
// no instruction is needed for the conversion
-let Predicates = [HasSSE2] in {
- def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
- def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
- def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
- def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
- def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
- def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
- def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
- def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
- def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
- def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
- def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
- def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
- def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
- def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
- def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
- def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
- def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
- def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
- def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
- def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
- def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
- def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
- def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
- def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
- def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
- def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
- def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
- def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
- def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
- def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
- def : Pat<(f128 (bitconvert (i128 FR128:$src))), (f128 FR128:$src)>;
- def : Pat<(i128 (bitconvert (f128 FR128:$src))), (i128 FR128:$src)>;
-}
+def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
+def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
+def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
+def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
+def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
+def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
+def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
+def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
+def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
+def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
+def : Pat<(f128 (bitconvert (i128 FR128:$src))), (f128 FR128:$src)>;
+def : Pat<(i128 (bitconvert (f128 FR128:$src))), (i128 FR128:$src)>;
// Bitcasts between 256-bit vector types. Return the original type since
// no instruction is needed for the conversion
-let Predicates = [HasAVX] in {
- def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
- def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
- def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
- def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
- def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
- def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
- def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
- def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
- def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
- def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
- def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
- def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
- def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
- def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
- def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
- def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
- def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
- def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
- def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
- def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
- def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
- def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
- def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
- def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
- def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
- def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
- def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
- def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
- def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
- def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
-}
+def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
+def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
+def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
+def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
+def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
+def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
+def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
+def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
+def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
+def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
+def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
+def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
+def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
+def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
+def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
+def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
+def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
+def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
+def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
+def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
+def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
+def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
+def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
+def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
+def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
+def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
+def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
+def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
+def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
+def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
// Alias instructions that map fld0 to xorps for sse or vxorps for avx.
// This is expanded by ExpandPostRAPseudos.
@@ -472,16 +468,13 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
// We set canFoldAsLoad because this can be converted to a constant-pool
// load of an all-zeros value if folding it would be beneficial.
let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
- isPseudo = 1, SchedRW = [WriteZero] in {
+ isPseudo = 1, Predicates = [NoVLX], SchedRW = [WriteZero] in {
def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "",
[(set VR128:$dst, (v4f32 immAllZerosV))]>;
}
-def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
+let Predicates = [NoVLX] in
def : Pat<(v4i32 immAllZerosV), (V_SET0)>;
-def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
-def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
-def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
// The same as done above but for AVX. The 256-bit AVX1 ISA doesn't support PI,
@@ -489,39 +482,9 @@ def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
// at the rename stage without using any execution unit, so SET0PSY
// and SET0PDY can be used for vector int instructions without penalty
let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
- isPseudo = 1, Predicates = [HasAVX], SchedRW = [WriteZero] in {
+ isPseudo = 1, Predicates = [HasAVX, NoVLX], SchedRW = [WriteZero] in {
def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "",
- [(set VR256:$dst, (v8f32 immAllZerosV))]>;
-}
-
-let Predicates = [HasAVX] in
- def : Pat<(v4f64 immAllZerosV), (AVX_SET0)>;
-
-let Predicates = [HasAVX2] in {
- def : Pat<(v4i64 immAllZerosV), (AVX_SET0)>;
- def : Pat<(v8i32 immAllZerosV), (AVX_SET0)>;
- def : Pat<(v16i16 immAllZerosV), (AVX_SET0)>;
- def : Pat<(v32i8 immAllZerosV), (AVX_SET0)>;
-}
-
-// AVX1 has no support for 256-bit integer instructions, but since the 128-bit
-// VPXOR instruction writes zero to its upper part, it's safe build zeros.
-let Predicates = [HasAVX1Only] in {
-def : Pat<(v32i8 immAllZerosV), (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
-def : Pat<(bc_v32i8 (v8f32 immAllZerosV)),
- (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
-
-def : Pat<(v16i16 immAllZerosV), (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
-def : Pat<(bc_v16i16 (v8f32 immAllZerosV)),
- (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
-
-def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
-def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
- (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
-
-def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
-def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
- (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
+ [(set VR256:$dst, (v8i32 immAllZerosV))]>;
}
// We set canFoldAsLoad because this can be converted to a constant-pool
@@ -649,15 +612,14 @@ let Predicates = [UseAVX] in {
def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
(v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
+ def : Pat<(v4f64 (X86vzload addr:$src)),
+ (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
}
// Extract and store.
def : Pat<(store (f32 (extractelt (v4f32 VR128:$src), (iPTR 0))),
addr:$dst),
(VMOVSSmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32))>;
- def : Pat<(store (f64 (extractelt (v2f64 VR128:$src), (iPTR 0))),
- addr:$dst),
- (VMOVSDmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64))>;
// Shuffle with VMOVSS
def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
@@ -771,11 +733,6 @@ let Predicates = [UseSSE2] in {
(COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
}
- // Extract and store.
- def : Pat<(store (f64 (extractelt (v2f64 VR128:$src), (iPTR 0))),
- addr:$dst),
- (MOVSDmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR64))>;
-
// Shuffle with MOVSD
def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
(MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
@@ -800,6 +757,13 @@ let Predicates = [UseSSE2] in {
(MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
}
+// Aliases to help the assembler pick two byte VEX encodings by swapping the
+// operands relative to the normal instructions to use VEX.R instead of VEX.B.
+def : InstAlias<"vmovss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ (VMOVSSrr_REV VR128L:$dst, VR128:$src1, VR128H:$src2), 0>;
+def : InstAlias<"vmovsd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ (VMOVSDrr_REV VR128L:$dst, VR128:$src1, VR128H:$src2), 0>;
+
//===----------------------------------------------------------------------===//
// SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
//===----------------------------------------------------------------------===//
@@ -937,10 +901,24 @@ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
}
-def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
- (VMOVUPSYmr addr:$dst, VR256:$src)>;
-def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
- (VMOVUPDYmr addr:$dst, VR256:$src)>;
+// Aliases to help the assembler pick two byte VEX encodings by swapping the
+// operands relative to the normal instructions to use VEX.R instead of VEX.B.
+def : InstAlias<"vmovaps\t{$src, $dst|$dst, $src}",
+ (VMOVAPSrr_REV VR128L:$dst, VR128H:$src), 0>;
+def : InstAlias<"vmovapd\t{$src, $dst|$dst, $src}",
+ (VMOVAPDrr_REV VR128L:$dst, VR128H:$src), 0>;
+def : InstAlias<"vmovups\t{$src, $dst|$dst, $src}",
+ (VMOVUPSrr_REV VR128L:$dst, VR128H:$src), 0>;
+def : InstAlias<"vmovupd\t{$src, $dst|$dst, $src}",
+ (VMOVUPDrr_REV VR128L:$dst, VR128H:$src), 0>;
+def : InstAlias<"vmovaps\t{$src, $dst|$dst, $src}",
+ (VMOVAPSYrr_REV VR256L:$dst, VR256H:$src), 0>;
+def : InstAlias<"vmovapd\t{$src, $dst|$dst, $src}",
+ (VMOVAPDYrr_REV VR256L:$dst, VR256H:$src), 0>;
+def : InstAlias<"vmovups\t{$src, $dst|$dst, $src}",
+ (VMOVUPSYrr_REV VR256L:$dst, VR256H:$src), 0>;
+def : InstAlias<"vmovupd\t{$src, $dst|$dst, $src}",
+ (VMOVUPDYrr_REV VR256L:$dst, VR256H:$src), 0>;
let SchedRW = [WriteStore] in {
def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
@@ -978,20 +956,6 @@ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
IIC_SSE_MOVU_P_RR>;
}
-let Predicates = [HasAVX] in {
- def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
- (VMOVUPSmr addr:$dst, VR128:$src)>;
- def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
- (VMOVUPDmr addr:$dst, VR128:$src)>;
-}
-
-let Predicates = [UseSSE1] in
- def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
- (MOVUPSmr addr:$dst, VR128:$src)>;
-let Predicates = [UseSSE2] in
- def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
- (MOVUPDmr addr:$dst, VR128:$src)>;
-
// Use vmovaps/vmovups for AVX integer load/store.
let Predicates = [HasAVX, NoVLX] in {
// 128-bit load/store
@@ -1004,18 +968,10 @@ let Predicates = [HasAVX, NoVLX] in {
(VMOVAPSmr addr:$dst, VR128:$src)>;
def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
(VMOVAPSmr addr:$dst, VR128:$src)>;
- def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
- (VMOVAPSmr addr:$dst, VR128:$src)>;
- def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
- (VMOVAPSmr addr:$dst, VR128:$src)>;
def : Pat<(store (v2i64 VR128:$src), addr:$dst),
(VMOVUPSmr addr:$dst, VR128:$src)>;
def : Pat<(store (v4i32 VR128:$src), addr:$dst),
(VMOVUPSmr addr:$dst, VR128:$src)>;
- def : Pat<(store (v8i16 VR128:$src), addr:$dst),
- (VMOVUPSmr addr:$dst, VR128:$src)>;
- def : Pat<(store (v16i8 VR128:$src), addr:$dst),
- (VMOVUPSmr addr:$dst, VR128:$src)>;
// 256-bit load/store
def : Pat<(alignedloadv4i64 addr:$src),
@@ -1026,18 +982,10 @@ let Predicates = [HasAVX, NoVLX] in {
(VMOVAPSYmr addr:$dst, VR256:$src)>;
def : Pat<(alignedstore256 (v8i32 VR256:$src), addr:$dst),
(VMOVAPSYmr addr:$dst, VR256:$src)>;
- def : Pat<(alignedstore256 (v16i16 VR256:$src), addr:$dst),
- (VMOVAPSYmr addr:$dst, VR256:$src)>;
- def : Pat<(alignedstore256 (v32i8 VR256:$src), addr:$dst),
- (VMOVAPSYmr addr:$dst, VR256:$src)>;
def : Pat<(store (v4i64 VR256:$src), addr:$dst),
(VMOVUPSYmr addr:$dst, VR256:$src)>;
def : Pat<(store (v8i32 VR256:$src), addr:$dst),
(VMOVUPSYmr addr:$dst, VR256:$src)>;
- def : Pat<(store (v16i16 VR256:$src), addr:$dst),
- (VMOVUPSYmr addr:$dst, VR256:$src)>;
- def : Pat<(store (v32i8 VR256:$src), addr:$dst),
- (VMOVUPSYmr addr:$dst, VR256:$src)>;
// Special patterns for storing subvector extracts of lower 128-bits
// Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
@@ -1080,6 +1028,28 @@ let Predicates = [HasAVX, NoVLX] in {
(VMOVUPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
}
+let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
+ // 128-bit load/store
+ def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
+ (VMOVAPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
+ (VMOVAPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(store (v8i16 VR128:$src), addr:$dst),
+ (VMOVUPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(store (v16i8 VR128:$src), addr:$dst),
+ (VMOVUPSmr addr:$dst, VR128:$src)>;
+
+ // 256-bit load/store
+ def : Pat<(alignedstore256 (v16i16 VR256:$src), addr:$dst),
+ (VMOVAPSYmr addr:$dst, VR256:$src)>;
+ def : Pat<(alignedstore256 (v32i8 VR256:$src), addr:$dst),
+ (VMOVAPSYmr addr:$dst, VR256:$src)>;
+ def : Pat<(store (v16i16 VR256:$src), addr:$dst),
+ (VMOVUPSYmr addr:$dst, VR256:$src)>;
+ def : Pat<(store (v32i8 VR256:$src), addr:$dst),
+ (VMOVUPSYmr addr:$dst, VR256:$src)>;
+}
+
// Use movaps / movups for SSE integer load / store (one byte shorter).
// The instructions selected below are then converted to MOVDQA/MOVDQU
// during the SSE domain pass.
@@ -2039,35 +2009,24 @@ def CVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
// SSE2 packed instructions with XS prefix
def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (int_x86_sse2_cvttps2dq VR128:$src))],
- IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
+ [], IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvttps2dq
- (loadv4f32 addr:$src)))],
- IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
+ [], IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR256:$dst,
- (int_x86_avx_cvtt_ps2dq_256 VR256:$src))],
- IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
+ [], IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256
- (loadv8f32 addr:$src)))],
- IIC_SSE_CVT_PS_RM>, VEX, VEX_L,
+ [], IIC_SSE_CVT_PS_RM>, VEX, VEX_L,
Sched<[WriteCvtF2ILd]>;
def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))],
- IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
+ [], IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (int_x86_sse2_cvttps2dq (memopv4f32 addr:$src)))],
- IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
+ [], IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
let Predicates = [HasAVX] in {
def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
@@ -2137,14 +2096,10 @@ def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
// YMM only
def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
"cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (int_x86_avx_cvtt_pd2dq_256 VR256:$src))],
- IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
+ [], IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
"cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (int_x86_avx_cvtt_pd2dq_256 (loadv4f64 addr:$src)))],
- IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
+ [], IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}",
(VCVTTPD2DQYrr VR128:$dst, VR256:$src), 0>;
@@ -2170,30 +2125,24 @@ def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
let Predicates = [HasAVX] in {
// SSE2 instructions without OpSize prefix
def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "vcvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
- IIC_SSE_CVT_PD_RR>, PS, VEX, Sched<[WriteCvtF2F]>;
+ "vcvtps2pd\t{$src, $dst|$dst, $src}",
+ [], IIC_SSE_CVT_PD_RR>, PS, VEX, Sched<[WriteCvtF2F]>;
def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
IIC_SSE_CVT_PD_RM>, PS, VEX, Sched<[WriteCvtF2FLd]>;
def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR256:$dst,
- (int_x86_avx_cvt_ps2_pd_256 VR128:$src))],
- IIC_SSE_CVT_PD_RR>, PS, VEX, VEX_L, Sched<[WriteCvtF2F]>;
+ [], IIC_SSE_CVT_PD_RR>, PS, VEX, VEX_L, Sched<[WriteCvtF2F]>;
def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR256:$dst,
- (int_x86_avx_cvt_ps2_pd_256 (loadv4f32 addr:$src)))],
- IIC_SSE_CVT_PD_RM>, PS, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
+ [], IIC_SSE_CVT_PD_RM>, PS, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
}
let Predicates = [UseSSE2] in {
def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
- IIC_SSE_CVT_PD_RR>, PS, Sched<[WriteCvtF2F]>;
+ "cvtps2pd\t{$src, $dst|$dst, $src}",
+ [], IIC_SSE_CVT_PD_RR>, PS, Sched<[WriteCvtF2F]>;
def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
@@ -2204,24 +2153,17 @@ def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
let Predicates = [HasAVX] in {
let hasSideEffects = 0, mayLoad = 1 in
def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
- "vcvtdq2pd\t{$src, $dst|$dst, $src}",
- []>, VEX, Sched<[WriteCvtI2FLd]>;
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}",
+ []>, VEX, Sched<[WriteCvtI2FLd]>;
def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "vcvtdq2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (int_x86_sse2_cvtdq2pd VR128:$src))]>, VEX,
- Sched<[WriteCvtI2F]>;
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}",
+ []>, VEX, Sched<[WriteCvtI2F]>;
def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
- "vcvtdq2pd\t{$src, $dst|$dst, $src}",
- [(set VR256:$dst,
- (int_x86_avx_cvtdq2_pd_256
- (bitconvert (loadv2i64 addr:$src))))]>, VEX, VEX_L,
- Sched<[WriteCvtI2FLd]>;
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}",
+ []>, VEX, VEX_L, Sched<[WriteCvtI2FLd]>;
def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
- "vcvtdq2pd\t{$src, $dst|$dst, $src}",
- [(set VR256:$dst,
- (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX, VEX_L,
- Sched<[WriteCvtI2F]>;
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}",
+ []>, VEX, VEX_L, Sched<[WriteCvtI2F]>;
}
let hasSideEffects = 0, mayLoad = 1 in
@@ -2229,8 +2171,7 @@ def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"cvtdq2pd\t{$src, $dst|$dst, $src}", [],
IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtI2FLd]>;
def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtdq2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))],
+ "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtI2F]>;
// AVX register conversion intrinsics
@@ -2239,6 +2180,8 @@ let Predicates = [HasAVX] in {
(VCVTDQ2PDrr VR128:$src)>;
def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (loadv2i64 addr:$src)))),
(VCVTDQ2PDrm addr:$src)>;
+ def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
+ (VCVTDQ2PDrm addr:$src)>;
def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
(VCVTDQ2PDYrr VR128:$src)>;
@@ -2252,6 +2195,8 @@ let Predicates = [HasSSE2] in {
(CVTDQ2PDrr VR128:$src)>;
def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (loadv2i64 addr:$src)))),
(CVTDQ2PDrm addr:$src)>;
+ def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
+ (CVTDQ2PDrm addr:$src)>;
} // Predicates = [HasSSE2]
// Convert packed double to packed single
@@ -2553,36 +2498,36 @@ let Constraints = "$src1 = $dst" in {
}
let Predicates = [HasAVX] in {
-def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
+def : Pat<(v4f32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
(VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
-def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (loadv4f32 addr:$src2), imm:$cc)),
+def : Pat<(v4f32 (X86cmpp (v4f32 VR128:$src1), (loadv4f32 addr:$src2), imm:$cc)),
(VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
-def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
+def : Pat<(v2f64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
(VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
-def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (loadv2f64 addr:$src2), imm:$cc)),
+def : Pat<(v2f64 (X86cmpp (v2f64 VR128:$src1), (loadv2f64 addr:$src2), imm:$cc)),
(VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
-def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
+def : Pat<(v8f32 (X86cmpp (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
(VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
-def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), (loadv8f32 addr:$src2), imm:$cc)),
+def : Pat<(v8f32 (X86cmpp (v8f32 VR256:$src1), (loadv8f32 addr:$src2), imm:$cc)),
(VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
-def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
+def : Pat<(v4f64 (X86cmpp (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
(VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
-def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), (loadv4f64 addr:$src2), imm:$cc)),
+def : Pat<(v4f64 (X86cmpp (v4f64 VR256:$src1), (loadv4f64 addr:$src2), imm:$cc)),
(VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
}
let Predicates = [UseSSE1] in {
-def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
+def : Pat<(v4f32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
(CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
-def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memopv4f32 addr:$src2), imm:$cc)),
+def : Pat<(v4f32 (X86cmpp (v4f32 VR128:$src1), (memopv4f32 addr:$src2), imm:$cc)),
(CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
}
let Predicates = [UseSSE2] in {
-def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
+def : Pat<(v2f64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
(CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
-def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memopv2f64 addr:$src2), imm:$cc)),
+def : Pat<(v2f64 (X86cmpp (v2f64 VR128:$src1), (memopv2f64 addr:$src2), imm:$cc)),
(CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
}
@@ -2763,58 +2708,30 @@ let Predicates = [HasAVX1Only] in {
//===----------------------------------------------------------------------===//
/// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
-multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
- Domain d> {
+multiclass sse12_extr_sign_mask<RegisterClass RC, ValueType vt,
+ string asm, Domain d> {
def rr : PI<0x50, MRMSrcReg, (outs GR32orGR64:$dst), (ins RC:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
- [(set GR32orGR64:$dst, (Int RC:$src))], IIC_SSE_MOVMSK, d>,
+ [(set GR32orGR64:$dst, (X86movmsk (vt RC:$src)))], IIC_SSE_MOVMSK, d>,
Sched<[WriteVecLogic]>;
}
let Predicates = [HasAVX] in {
- defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
- "movmskps", SSEPackedSingle>, PS, VEX;
- defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
- "movmskpd", SSEPackedDouble>, PD, VEX;
- defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
- "movmskps", SSEPackedSingle>, PS,
- VEX, VEX_L;
- defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
- "movmskpd", SSEPackedDouble>, PD,
- VEX, VEX_L;
-
- def : Pat<(i32 (X86fgetsign FR32:$src)),
- (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(i64 (X86fgetsign FR32:$src)),
- (SUBREG_TO_REG (i64 0),
- (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>;
- def : Pat<(i32 (X86fgetsign FR64:$src)),
- (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(i64 (X86fgetsign FR64:$src)),
- (SUBREG_TO_REG (i64 0),
- (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>;
+ defm VMOVMSKPS : sse12_extr_sign_mask<VR128, v4f32, "movmskps",
+ SSEPackedSingle>, PS, VEX;
+ defm VMOVMSKPD : sse12_extr_sign_mask<VR128, v2f64, "movmskpd",
+ SSEPackedDouble>, PD, VEX;
+ defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, v8f32, "movmskps",
+ SSEPackedSingle>, PS, VEX, VEX_L;
+ defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, v4f64, "movmskpd",
+ SSEPackedDouble>, PD, VEX, VEX_L;
}
-defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
+defm MOVMSKPS : sse12_extr_sign_mask<VR128, v4f32, "movmskps",
SSEPackedSingle>, PS;
-defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
+defm MOVMSKPD : sse12_extr_sign_mask<VR128, v2f64, "movmskpd",
SSEPackedDouble>, PD;
-def : Pat<(i32 (X86fgetsign FR32:$src)),
- (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>,
- Requires<[UseSSE1]>;
-def : Pat<(i64 (X86fgetsign FR32:$src)),
- (SUBREG_TO_REG (i64 0),
- (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>,
- Requires<[UseSSE1]>;
-def : Pat<(i32 (X86fgetsign FR64:$src)),
- (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>,
- Requires<[UseSSE2]>;
-def : Pat<(i64 (X86fgetsign FR64:$src)),
- (SUBREG_TO_REG (i64 0),
- (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>,
- Requires<[UseSSE2]>;
-
//===---------------------------------------------------------------------===//
// SSE2 - Packed Integer Logical Instructions
//===---------------------------------------------------------------------===//
@@ -3695,16 +3612,14 @@ def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
PS, Requires<[HasSSE2]>;
} // SchedRW = [WriteStore]
-let Predicates = [HasAVX2, NoVLX] in {
+let Predicates = [HasAVX, NoVLX] in {
def : Pat<(alignednontemporalstore (v8i32 VR256:$src), addr:$dst),
(VMOVNTDQYmr addr:$dst, VR256:$src)>;
def : Pat<(alignednontemporalstore (v16i16 VR256:$src), addr:$dst),
(VMOVNTDQYmr addr:$dst, VR256:$src)>;
def : Pat<(alignednontemporalstore (v32i8 VR256:$src), addr:$dst),
(VMOVNTDQYmr addr:$dst, VR256:$src)>;
-}
-let Predicates = [HasAVX, NoVLX] in {
def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
(VMOVNTDQmr addr:$dst, VR128:$src)>;
def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
@@ -3713,12 +3628,14 @@ let Predicates = [HasAVX, NoVLX] in {
(VMOVNTDQmr addr:$dst, VR128:$src)>;
}
-def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
- (MOVNTDQmr addr:$dst, VR128:$src)>;
-def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
- (MOVNTDQmr addr:$dst, VR128:$src)>;
-def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst),
- (MOVNTDQmr addr:$dst, VR128:$src)>;
+let Predicates = [UseSSE2] in {
+ def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
+ (MOVNTDQmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
+ (MOVNTDQmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst),
+ (MOVNTDQmr addr:$dst, VR128:$src)>;
+}
} // AddedComplexity
@@ -3760,6 +3677,8 @@ def PAUSE : I<0x90, RawFrm, (outs), (ins),
let SchedRW = [WriteFence] in {
// Load, store, and memory fence
+// TODO: As with mfence, we may want to ease the availablity of sfence/lfence
+// to include any 64-bit target.
def SFENCE : I<0xAE, MRM_F8, (outs), (ins),
"sfence", [(int_x86_sse_sfence)], IIC_SSE_SFENCE>,
PS, Requires<[HasSSE1]>;
@@ -3768,11 +3687,9 @@ def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
TB, Requires<[HasSSE2]>;
def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
"mfence", [(int_x86_sse2_mfence)], IIC_SSE_MFENCE>,
- TB, Requires<[HasSSE2]>;
+ TB, Requires<[HasMFence]>;
} // SchedRW
-def : Pat<(X86SFence), (SFENCE)>;
-def : Pat<(X86LFence), (LFENCE)>;
def : Pat<(X86MFence), (MFENCE)>;
//===----------------------------------------------------------------------===//
@@ -3920,15 +3837,16 @@ def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
} // ExeDomain = SSEPackedInt
-let Predicates = [HasAVX] in {
- def : Pat<(int_x86_sse2_storeu_dq addr:$dst, VR128:$src),
- (VMOVDQUmr addr:$dst, VR128:$src)>;
- def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
- (VMOVDQUYmr addr:$dst, VR256:$src)>;
-}
-let Predicates = [UseSSE2] in
-def : Pat<(int_x86_sse2_storeu_dq addr:$dst, VR128:$src),
- (MOVDQUmr addr:$dst, VR128:$src)>;
+// Aliases to help the assembler pick two byte VEX encodings by swapping the
+// operands relative to the normal instructions to use VEX.R instead of VEX.B.
+def : InstAlias<"vmovdqa\t{$src, $dst|$dst, $src}",
+ (VMOVDQArr_REV VR128L:$dst, VR128H:$src), 0>;
+def : InstAlias<"vmovdqa\t{$src, $dst|$dst, $src}",
+ (VMOVDQAYrr_REV VR256L:$dst, VR256H:$src), 0>;
+def : InstAlias<"vmovdqu\t{$src, $dst|$dst, $src}",
+ (VMOVDQUrr_REV VR128L:$dst, VR128H:$src), 0>;
+def : InstAlias<"vmovdqu\t{$src, $dst|$dst, $src}",
+ (VMOVDQUYrr_REV VR256L:$dst, VR256H:$src), 0>;
//===---------------------------------------------------------------------===//
// SSE2 - Packed Integer Arithmetic Instructions
@@ -3985,7 +3903,7 @@ let Predicates = [HasAVX2] in
multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
string OpcodeStr, SDNode OpNode,
SDNode OpNode2, RegisterClass RC,
- ValueType DstVT, ValueType SrcVT, PatFrag bc_frag,
+ ValueType DstVT, ValueType SrcVT,
PatFrag ld_frag, ShiftOpndItins itins,
bit Is2Addr = 1> {
// src2 is always 128-bit
@@ -4002,7 +3920,7 @@ multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set RC:$dst, (DstVT (OpNode RC:$src1,
- (bc_frag (ld_frag addr:$src2)))))], itins.rm>,
+ (SrcVT (bitconvert (ld_frag addr:$src2))))))], itins.rm>,
Sched<[WriteVecShiftLd, ReadAfterLd]>;
def ri : PDIi8<opc2, ImmForm, (outs RC:$dst),
(ins RC:$src1, u8imm:$src2),
@@ -4046,6 +3964,14 @@ defm PADDD : PDI_binop_all<0xFE, "paddd", add, v4i32, v8i32,
SSE_INTALU_ITINS_P, 1, NoVLX>;
defm PADDQ : PDI_binop_all<0xD4, "paddq", add, v2i64, v4i64,
SSE_INTALUQ_ITINS_P, 1, NoVLX>;
+defm PADDSB : PDI_binop_all<0xEC, "paddsb", X86adds, v16i8, v32i8,
+ SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
+defm PADDSW : PDI_binop_all<0xED, "paddsw", X86adds, v8i16, v16i16,
+ SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
+defm PADDUSB : PDI_binop_all<0xDC, "paddusb", X86addus, v16i8, v32i8,
+ SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
+defm PADDUSW : PDI_binop_all<0xDD, "paddusw", X86addus, v8i16, v16i16,
+ SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
defm PMULLW : PDI_binop_all<0xD5, "pmullw", mul, v8i16, v16i16,
SSE_INTMUL_ITINS_P, 1, NoVLX_Or_NoBWI>;
defm PMULHUW : PDI_binop_all<0xE4, "pmulhuw", mulhu, v8i16, v16i16,
@@ -4060,6 +3986,10 @@ defm PSUBD : PDI_binop_all<0xFA, "psubd", sub, v4i32, v8i32,
SSE_INTALU_ITINS_P, 0, NoVLX>;
defm PSUBQ : PDI_binop_all<0xFB, "psubq", sub, v2i64, v4i64,
SSE_INTALUQ_ITINS_P, 0, NoVLX>;
+defm PSUBSB : PDI_binop_all<0xE8, "psubsb", X86subs, v16i8, v32i8,
+ SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
+defm PSUBSW : PDI_binop_all<0xE9, "psubsw", X86subs, v8i16, v16i16,
+ SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
defm PSUBUSB : PDI_binop_all<0xD8, "psubusb", X86subus, v16i8, v32i8,
SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
defm PSUBUSW : PDI_binop_all<0xD9, "psubusw", X86subus, v8i16, v16i16,
@@ -4078,26 +4008,14 @@ defm PAVGW : PDI_binop_all<0xE3, "pavgw", X86avg, v8i16, v16i16,
SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
// Intrinsic forms
-defm PSUBSB : PDI_binop_all_int<0xE8, "psubsb", int_x86_sse2_psubs_b,
- int_x86_avx2_psubs_b, SSE_INTALU_ITINS_P, 0>;
-defm PSUBSW : PDI_binop_all_int<0xE9, "psubsw" , int_x86_sse2_psubs_w,
- int_x86_avx2_psubs_w, SSE_INTALU_ITINS_P, 0>;
-defm PADDSB : PDI_binop_all_int<0xEC, "paddsb" , int_x86_sse2_padds_b,
- int_x86_avx2_padds_b, SSE_INTALU_ITINS_P, 1>;
-defm PADDSW : PDI_binop_all_int<0xED, "paddsw" , int_x86_sse2_padds_w,
- int_x86_avx2_padds_w, SSE_INTALU_ITINS_P, 1>;
-defm PADDUSB : PDI_binop_all_int<0xDC, "paddusb", int_x86_sse2_paddus_b,
- int_x86_avx2_paddus_b, SSE_INTALU_ITINS_P, 1>;
-defm PADDUSW : PDI_binop_all_int<0xDD, "paddusw", int_x86_sse2_paddus_w,
- int_x86_avx2_paddus_w, SSE_INTALU_ITINS_P, 1>;
defm PMADDWD : PDI_binop_all_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd,
int_x86_avx2_pmadd_wd, SSE_PMADD, 1>;
-let Predicates = [HasAVX] in
+let Predicates = [HasAVX, NoVLX_Or_NoBWI] in
defm VPSADBW : PDI_binop_rm2<0xF6, "vpsadbw", X86psadbw, v2i64, v16i8, VR128,
loadv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
VEX_4V;
-let Predicates = [HasAVX2] in
+let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in
defm VPSADBWY : PDI_binop_rm2<0xF6, "vpsadbw", X86psadbw, v4i64, v32i8, VR256,
loadv4i64, i256mem, SSE_INTMUL_ITINS_P, 1, 0>,
VEX_4V, VEX_L;
@@ -4105,11 +4023,11 @@ let Constraints = "$src1 = $dst" in
defm PSADBW : PDI_binop_rm2<0xF6, "psadbw", X86psadbw, v2i64, v16i8, VR128,
memopv2i64, i128mem, SSE_INTALU_ITINS_P, 1>;
-let Predicates = [HasAVX] in
+let Predicates = [HasAVX, NoVLX] in
defm VPMULUDQ : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v2i64, v4i32, VR128,
loadv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
VEX_4V;
-let Predicates = [HasAVX2] in
+let Predicates = [HasAVX2, NoVLX] in
defm VPMULUDQY : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v4i64, v8i32,
VR256, loadv4i64, i256mem,
SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
@@ -4123,33 +4041,33 @@ defm PMULUDQ : PDI_binop_rm2<0xF4, "pmuludq", X86pmuludq, v2i64, v4i32, VR128,
let Predicates = [HasAVX, NoVLX] in {
defm VPSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
- VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
+ VR128, v4i32, v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
- VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
+ VR128, v2i64, v2i64, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
- VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
+ VR128, v4i32, v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
- VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
+ VR128, v2i64, v2i64, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
- VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
+ VR128, v4i32, v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
} // Predicates = [HasAVX, NoVLX]
let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
- VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
+ VR128, v8i16, v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
- VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
+ VR128, v8i16, v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
- VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
+ VR128, v8i16, v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
} // Predicates = [HasAVX, NoVLX_Or_NoBWI]
@@ -4161,46 +4079,46 @@ let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] ,
(outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
"vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (v2i64 (X86vshldq VR128:$src1, (i8 imm:$src2))))]>,
+ (v16i8 (X86vshldq VR128:$src1, (i8 imm:$src2))))]>,
VEX_4V;
def VPSRLDQri : PDIi8<0x73, MRM3r,
(outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
"vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (v2i64 (X86vshrdq VR128:$src1, (i8 imm:$src2))))]>,
+ (v16i8 (X86vshrdq VR128:$src1, (i8 imm:$src2))))]>,
VEX_4V;
// PSRADQri doesn't exist in SSE[1-3].
} // Predicates = [HasAVX, NoVLX_Or_NoBWI]
let Predicates = [HasAVX2, NoVLX] in {
defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
- VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
+ VR256, v8i32, v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSLLQY : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
- VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
+ VR256, v4i64, v2i64, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
- VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
+ VR256, v8i32, v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRLQY : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
- VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
+ VR256, v4i64, v2i64, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
- VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
+ VR256, v8i32, v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
}// Predicates = [HasAVX2, NoVLX]
let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
- VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
+ VR256, v16i16, v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
- VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
+ VR256, v16i16, v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
- VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
+ VR256, v16i16, v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
}// Predicates = [HasAVX2, NoVLX_Or_NoBWI]
@@ -4211,43 +4129,43 @@ let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 ,
(outs VR256:$dst), (ins VR256:$src1, u8imm:$src2),
"vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR256:$dst,
- (v4i64 (X86vshldq VR256:$src1, (i8 imm:$src2))))]>,
+ (v32i8 (X86vshldq VR256:$src1, (i8 imm:$src2))))]>,
VEX_4V, VEX_L;
def VPSRLDQYri : PDIi8<0x73, MRM3r,
(outs VR256:$dst), (ins VR256:$src1, u8imm:$src2),
"vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR256:$dst,
- (v4i64 (X86vshrdq VR256:$src1, (i8 imm:$src2))))]>,
+ (v32i8 (X86vshrdq VR256:$src1, (i8 imm:$src2))))]>,
VEX_4V, VEX_L;
// PSRADQYri doesn't exist in SSE[1-3].
} // Predicates = [HasAVX2, NoVLX_Or_NoBWI]
let Constraints = "$src1 = $dst" in {
defm PSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "psllw", X86vshl, X86vshli,
- VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
+ VR128, v8i16, v8i16, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "pslld", X86vshl, X86vshli,
- VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
+ VR128, v4i32, v4i32, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "psllq", X86vshl, X86vshli,
- VR128, v2i64, v2i64, bc_v2i64, memopv2i64,
+ VR128, v2i64, v2i64, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "psrlw", X86vsrl, X86vsrli,
- VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
+ VR128, v8i16, v8i16, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "psrld", X86vsrl, X86vsrli,
- VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
+ VR128, v4i32, v4i32, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "psrlq", X86vsrl, X86vsrli,
- VR128, v2i64, v2i64, bc_v2i64, memopv2i64,
+ VR128, v2i64, v2i64, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "psraw", X86vsra, X86vsrai,
- VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
+ VR128, v8i16, v8i16, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "psrad", X86vsra, X86vsrai,
- VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
+ VR128, v4i32, v4i32, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 in {
@@ -4256,13 +4174,13 @@ let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 in {
(outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
"pslldq\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
- (v2i64 (X86vshldq VR128:$src1, (i8 imm:$src2))))],
+ (v16i8 (X86vshldq VR128:$src1, (i8 imm:$src2))))],
IIC_SSE_INTSHDQ_P_RI>;
def PSRLDQri : PDIi8<0x73, MRM3r,
(outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
"psrldq\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
- (v2i64 (X86vshrdq VR128:$src1, (i8 imm:$src2))))],
+ (v16i8 (X86vshrdq VR128:$src1, (i8 imm:$src2))))],
IIC_SSE_INTSHDQ_P_RI>;
// PSRADQri doesn't exist in SSE[1-3].
}
@@ -4273,17 +4191,17 @@ let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 in {
//===---------------------------------------------------------------------===//
defm PCMPEQB : PDI_binop_all<0x74, "pcmpeqb", X86pcmpeq, v16i8, v32i8,
- SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
+ SSE_INTALU_ITINS_P, 1, TruePredicate>;
defm PCMPEQW : PDI_binop_all<0x75, "pcmpeqw", X86pcmpeq, v8i16, v16i16,
- SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
+ SSE_INTALU_ITINS_P, 1, TruePredicate>;
defm PCMPEQD : PDI_binop_all<0x76, "pcmpeqd", X86pcmpeq, v4i32, v8i32,
- SSE_INTALU_ITINS_P, 1, NoVLX>;
+ SSE_INTALU_ITINS_P, 1, TruePredicate>;
defm PCMPGTB : PDI_binop_all<0x64, "pcmpgtb", X86pcmpgt, v16i8, v32i8,
- SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
+ SSE_INTALU_ITINS_P, 0, TruePredicate>;
defm PCMPGTW : PDI_binop_all<0x65, "pcmpgtw", X86pcmpgt, v8i16, v16i16,
- SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
+ SSE_INTALU_ITINS_P, 0, TruePredicate>;
defm PCMPGTD : PDI_binop_all<0x66, "pcmpgtd", X86pcmpgt, v4i32, v8i32,
- SSE_INTALU_ITINS_P, 0, NoVLX>;
+ SSE_INTALU_ITINS_P, 0, TruePredicate>;
//===---------------------------------------------------------------------===//
// SSE2 - Packed Integer Shuffle Instructions
@@ -4291,8 +4209,8 @@ defm PCMPGTD : PDI_binop_all<0x66, "pcmpgtd", X86pcmpgt, v4i32, v8i32,
let ExeDomain = SSEPackedInt in {
multiclass sse2_pshuffle<string OpcodeStr, ValueType vt128, ValueType vt256,
- SDNode OpNode> {
-let Predicates = [HasAVX] in {
+ SDNode OpNode, Predicate prd> {
+let Predicates = [HasAVX, prd] in {
def V#NAME#ri : Ii8<0x70, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, u8imm:$src2),
!strconcat("v", OpcodeStr,
@@ -4310,7 +4228,7 @@ let Predicates = [HasAVX] in {
Sched<[WriteShuffleLd]>;
}
-let Predicates = [HasAVX2] in {
+let Predicates = [HasAVX2, prd] in {
def V#NAME#Yri : Ii8<0x70, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, u8imm:$src2),
!strconcat("v", OpcodeStr,
@@ -4348,9 +4266,11 @@ let Predicates = [UseSSE2] in {
}
} // ExeDomain = SSEPackedInt
-defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, v8i32, X86PShufd>, PD;
-defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, v16i16, X86PShufhw>, XS;
-defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, v16i16, X86PShuflw>, XD;
+defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, v8i32, X86PShufd, NoVLX>, PD;
+defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, v16i16, X86PShufhw,
+ NoVLX_Or_NoBWI>, XS;
+defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, v16i16, X86PShuflw,
+ NoVLX_Or_NoBWI>, XD;
let Predicates = [HasAVX] in {
def : Pat<(v4f32 (X86PShufd (loadv4f32 addr:$src1), (i8 imm:$imm))),
@@ -4372,8 +4292,8 @@ let Predicates = [UseSSE2] in {
let ExeDomain = SSEPackedInt in {
multiclass sse2_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
- ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
- PatFrag ld_frag, bit Is2Addr = 1> {
+ ValueType ArgVT, SDNode OpNode, PatFrag ld_frag,
+ bit Is2Addr = 1> {
def rr : PDI<opc, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
!if(Is2Addr,
@@ -4390,13 +4310,13 @@ multiclass sse2_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst,
- (OutVT (OpNode VR128:$src1,
- (bc_frag (ld_frag addr:$src2)))))]>,
+ (OutVT (OpNode (ArgVT VR128:$src1),
+ (bitconvert (ld_frag addr:$src2)))))]>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
multiclass sse2_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
- ValueType ArgVT, SDNode OpNode, PatFrag bc_frag> {
+ ValueType ArgVT, SDNode OpNode> {
def Yrr : PDI<opc, MRMSrcReg,
(outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
!strconcat(OpcodeStr,
@@ -4409,14 +4329,14 @@ multiclass sse2_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
- (OutVT (OpNode VR256:$src1,
- (bc_frag (loadv4i64 addr:$src2)))))]>,
+ (OutVT (OpNode (ArgVT VR256:$src1),
+ (bitconvert (loadv4i64 addr:$src2)))))]>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
multiclass sse4_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
- ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
- PatFrag ld_frag, bit Is2Addr = 1> {
+ ValueType ArgVT, SDNode OpNode, PatFrag ld_frag,
+ bit Is2Addr = 1> {
def rr : SS48I<opc, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
!if(Is2Addr,
@@ -4433,13 +4353,13 @@ multiclass sse4_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst,
- (OutVT (OpNode VR128:$src1,
- (bc_frag (ld_frag addr:$src2)))))]>,
+ (OutVT (OpNode (ArgVT VR128:$src1),
+ (bitconvert (ld_frag addr:$src2)))))]>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
multiclass sse4_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
- ValueType ArgVT, SDNode OpNode, PatFrag bc_frag> {
+ ValueType ArgVT, SDNode OpNode> {
def Yrr : SS48I<opc, MRMSrcReg,
(outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
!strconcat(OpcodeStr,
@@ -4452,47 +4372,46 @@ multiclass sse4_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
- (OutVT (OpNode VR256:$src1,
- (bc_frag (loadv4i64 addr:$src2)))))]>,
+ (OutVT (OpNode (ArgVT VR256:$src1),
+ (bitconvert (loadv4i64 addr:$src2)))))]>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
-let Predicates = [HasAVX] in {
+let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
defm VPACKSSWB : sse2_pack<0x63, "vpacksswb", v16i8, v8i16, X86Packss,
- bc_v8i16, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPACKSSDW : sse2_pack<0x6B, "vpackssdw", v8i16, v4i32, X86Packss,
- bc_v4i32, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPACKUSWB : sse2_pack<0x67, "vpackuswb", v16i8, v8i16, X86Packus,
- bc_v8i16, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPACKUSDW : sse4_pack<0x2B, "vpackusdw", v8i16, v4i32, X86Packus,
- bc_v4i32, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
}
-let Predicates = [HasAVX2] in {
- defm VPACKSSWB : sse2_pack_y<0x63, "vpacksswb", v32i8, v16i16, X86Packss,
- bc_v16i16>, VEX_4V, VEX_L;
- defm VPACKSSDW : sse2_pack_y<0x6B, "vpackssdw", v16i16, v8i32, X86Packss,
- bc_v8i32>, VEX_4V, VEX_L;
+let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
+ defm VPACKSSWB : sse2_pack_y<0x63, "vpacksswb", v32i8, v16i16, X86Packss>,
+ VEX_4V, VEX_L;
+ defm VPACKSSDW : sse2_pack_y<0x6B, "vpackssdw", v16i16, v8i32, X86Packss>,
+ VEX_4V, VEX_L;
- defm VPACKUSWB : sse2_pack_y<0x67, "vpackuswb", v32i8, v16i16, X86Packus,
- bc_v16i16>, VEX_4V, VEX_L;
- defm VPACKUSDW : sse4_pack_y<0x2B, "vpackusdw", v16i16, v8i32, X86Packus,
- bc_v8i32>, VEX_4V, VEX_L;
+ defm VPACKUSWB : sse2_pack_y<0x67, "vpackuswb", v32i8, v16i16, X86Packus>,
+ VEX_4V, VEX_L;
+ defm VPACKUSDW : sse4_pack_y<0x2B, "vpackusdw", v16i16, v8i32, X86Packus>,
+ VEX_4V, VEX_L;
}
let Constraints = "$src1 = $dst" in {
defm PACKSSWB : sse2_pack<0x63, "packsswb", v16i8, v8i16, X86Packss,
- bc_v8i16, memopv2i64>;
+ memopv2i64>;
defm PACKSSDW : sse2_pack<0x6B, "packssdw", v8i16, v4i32, X86Packss,
- bc_v4i32, memopv2i64>;
+ memopv2i64>;
defm PACKUSWB : sse2_pack<0x67, "packuswb", v16i8, v8i16, X86Packus,
- bc_v8i16, memopv2i64>;
+ memopv2i64>;
- let Predicates = [HasSSE41] in
defm PACKUSDW : sse4_pack<0x2B, "packusdw", v8i16, v4i32, X86Packus,
- bc_v4i32, memopv2i64>;
+ memopv2i64>;
}
} // ExeDomain = SSEPackedInt
@@ -4502,8 +4421,7 @@ let Constraints = "$src1 = $dst" in {
let ExeDomain = SSEPackedInt in {
multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
- SDNode OpNode, PatFrag bc_frag, PatFrag ld_frag,
- bit Is2Addr = 1> {
+ SDNode OpNode, PatFrag ld_frag, bit Is2Addr = 1> {
def rr : PDI<opc, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
!if(Is2Addr,
@@ -4516,14 +4434,14 @@ multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
!if(Is2Addr,
!strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst, (OpNode VR128:$src1,
- (bc_frag (ld_frag addr:$src2))))],
+ [(set VR128:$dst, (vt (OpNode VR128:$src1,
+ (bitconvert (ld_frag addr:$src2)))))],
IIC_SSE_UNPCK>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
multiclass sse2_unpack_y<bits<8> opc, string OpcodeStr, ValueType vt,
- SDNode OpNode, PatFrag bc_frag> {
+ SDNode OpNode> {
def Yrr : PDI<opc, MRMSrcReg,
(outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
!strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -4532,72 +4450,72 @@ multiclass sse2_unpack_y<bits<8> opc, string OpcodeStr, ValueType vt,
def Yrm : PDI<opc, MRMSrcMem,
(outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
!strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR256:$dst, (OpNode VR256:$src1,
- (bc_frag (loadv4i64 addr:$src2))))]>,
+ [(set VR256:$dst, (vt (OpNode VR256:$src1,
+ (bitconvert (loadv4i64 addr:$src2)))))]>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Unpckl,
- bc_v16i8, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Unpckl,
- bc_v8i16, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Unpckh,
- bc_v16i8, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Unpckh,
- bc_v8i16, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
}
let Predicates = [HasAVX, NoVLX] in {
defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Unpckl,
- bc_v4i32, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPUNPCKLQDQ : sse2_unpack<0x6C, "vpunpcklqdq", v2i64, X86Unpckl,
- bc_v2i64, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Unpckh,
- bc_v4i32, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPUNPCKHQDQ : sse2_unpack<0x6D, "vpunpckhqdq", v2i64, X86Unpckh,
- bc_v2i64, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
}
let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
- defm VPUNPCKLBW : sse2_unpack_y<0x60, "vpunpcklbw", v32i8, X86Unpckl,
- bc_v32i8>, VEX_4V, VEX_L;
- defm VPUNPCKLWD : sse2_unpack_y<0x61, "vpunpcklwd", v16i16, X86Unpckl,
- bc_v16i16>, VEX_4V, VEX_L;
- defm VPUNPCKHBW : sse2_unpack_y<0x68, "vpunpckhbw", v32i8, X86Unpckh,
- bc_v32i8>, VEX_4V, VEX_L;
- defm VPUNPCKHWD : sse2_unpack_y<0x69, "vpunpckhwd", v16i16, X86Unpckh,
- bc_v16i16>, VEX_4V, VEX_L;
+ defm VPUNPCKLBW : sse2_unpack_y<0x60, "vpunpcklbw", v32i8, X86Unpckl>,
+ VEX_4V, VEX_L;
+ defm VPUNPCKLWD : sse2_unpack_y<0x61, "vpunpcklwd", v16i16, X86Unpckl>,
+ VEX_4V, VEX_L;
+ defm VPUNPCKHBW : sse2_unpack_y<0x68, "vpunpckhbw", v32i8, X86Unpckh>,
+ VEX_4V, VEX_L;
+ defm VPUNPCKHWD : sse2_unpack_y<0x69, "vpunpckhwd", v16i16, X86Unpckh>,
+ VEX_4V, VEX_L;
}
let Predicates = [HasAVX2, NoVLX] in {
- defm VPUNPCKLDQ : sse2_unpack_y<0x62, "vpunpckldq", v8i32, X86Unpckl,
- bc_v8i32>, VEX_4V, VEX_L;
- defm VPUNPCKLQDQ : sse2_unpack_y<0x6C, "vpunpcklqdq", v4i64, X86Unpckl,
- bc_v4i64>, VEX_4V, VEX_L;
- defm VPUNPCKHDQ : sse2_unpack_y<0x6A, "vpunpckhdq", v8i32, X86Unpckh,
- bc_v8i32>, VEX_4V, VEX_L;
- defm VPUNPCKHQDQ : sse2_unpack_y<0x6D, "vpunpckhqdq", v4i64, X86Unpckh,
- bc_v4i64>, VEX_4V, VEX_L;
+ defm VPUNPCKLDQ : sse2_unpack_y<0x62, "vpunpckldq", v8i32, X86Unpckl>,
+ VEX_4V, VEX_L;
+ defm VPUNPCKLQDQ : sse2_unpack_y<0x6C, "vpunpcklqdq", v4i64, X86Unpckl>,
+ VEX_4V, VEX_L;
+ defm VPUNPCKHDQ : sse2_unpack_y<0x6A, "vpunpckhdq", v8i32, X86Unpckh>,
+ VEX_4V, VEX_L;
+ defm VPUNPCKHQDQ : sse2_unpack_y<0x6D, "vpunpckhqdq", v4i64, X86Unpckh>,
+ VEX_4V, VEX_L;
}
let Constraints = "$src1 = $dst" in {
defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Unpckl,
- bc_v16i8, memopv2i64>;
+ memopv2i64>;
defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Unpckl,
- bc_v8i16, memopv2i64>;
+ memopv2i64>;
defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Unpckl,
- bc_v4i32, memopv2i64>;
+ memopv2i64>;
defm PUNPCKLQDQ : sse2_unpack<0x6C, "punpcklqdq", v2i64, X86Unpckl,
- bc_v2i64, memopv2i64>;
+ memopv2i64>;
defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Unpckh,
- bc_v16i8, memopv2i64>;
+ memopv2i64>;
defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Unpckh,
- bc_v8i16, memopv2i64>;
+ memopv2i64>;
defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Unpckh,
- bc_v4i32, memopv2i64>;
+ memopv2i64>;
defm PUNPCKHQDQ : sse2_unpack<0x6D, "punpckhqdq", v2i64, X86Unpckh,
- bc_v2i64, memopv2i64>;
+ memopv2i64>;
}
} // ExeDomain = SSEPackedInt
@@ -4661,20 +4579,20 @@ let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLogic] in {
def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
(ins VR128:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
+ [(set GR32orGR64:$dst, (X86movmsk (v16i8 VR128:$src)))],
IIC_SSE_MOVMSK>, VEX;
let Predicates = [HasAVX2] in {
def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
(ins VR256:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32orGR64:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>,
+ [(set GR32orGR64:$dst, (X86movmsk (v32i8 VR256:$src)))]>,
VEX, VEX_L;
}
def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
+ [(set GR32orGR64:$dst, (X86movmsk (v16i8 VR128:$src)))],
IIC_SSE_MOVMSK>;
} // ExeDomain = SSEPackedInt
@@ -5022,16 +4940,14 @@ def MOVPQI2QIrr : S2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
"movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>;
}
+// Aliases to help the assembler pick two byte VEX encodings by swapping the
+// operands relative to the normal instructions to use VEX.R instead of VEX.B.
+def : InstAlias<"vmovq\t{$src, $dst|$dst, $src}",
+ (VMOVPQI2QIrr VR128L:$dst, VR128H:$src), 0>;
+
//===---------------------------------------------------------------------===//
// Store / copy lower 64-bits of a XMM register.
//
-let Predicates = [HasAVX] in
-def : Pat<(int_x86_sse2_storel_dq addr:$dst, VR128:$src),
- (VMOVPQI2QImr addr:$dst, VR128:$src)>;
-let Predicates = [UseSSE2] in
-def : Pat<(int_x86_sse2_storel_dq addr:$dst, VR128:$src),
- (MOVPQI2QImr addr:$dst, VR128:$src)>;
-
let ExeDomain = SSEPackedInt, isCodeGenOnly = 1, AddedComplexity = 20 in {
def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
@@ -5058,6 +4974,8 @@ let Predicates = [UseAVX], AddedComplexity = 20 in {
def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
(v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
(SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrm addr:$src), sub_xmm)>;
+ def : Pat<(v4i64 (X86vzload addr:$src)),
+ (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrm addr:$src), sub_xmm)>;
}
let Predicates = [UseSSE2], AddedComplexity = 20 in {
@@ -5066,13 +4984,6 @@ let Predicates = [UseSSE2], AddedComplexity = 20 in {
def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
}
-let Predicates = [HasAVX] in {
-def : Pat<(v4i64 (alignedX86vzload addr:$src)),
- (SUBREG_TO_REG (i32 0), (VMOVAPSrm addr:$src), sub_xmm)>;
-def : Pat<(v4i64 (X86vzload addr:$src)),
- (SUBREG_TO_REG (i32 0), (VMOVUPSrm addr:$src), sub_xmm)>;
-}
-
//===---------------------------------------------------------------------===//
// Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
// IA32 document. movq xmm1, xmm2 does clear the high bits.
@@ -5442,38 +5353,36 @@ let Constraints = "$src1 = $dst" in {
/// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
-multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
- PatFrag ld_frag> {
+multiclass SS3I_unop_rm<bits<8> opc, string OpcodeStr, ValueType vt,
+ SDNode OpNode, PatFrag ld_frag> {
def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId128 VR128:$src))], IIC_SSE_PABS_RR>,
- Sched<[WriteVecALU]>;
+ [(set VR128:$dst, (vt (OpNode VR128:$src)))],
+ IIC_SSE_PABS_RR>, Sched<[WriteVecALU]>;
def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
- (IntId128
- (bitconvert (ld_frag addr:$src))))], IIC_SSE_PABS_RM>,
- Sched<[WriteVecALULd]>;
+ (vt (OpNode (bitconvert (ld_frag addr:$src)))))],
+ IIC_SSE_PABS_RM>, Sched<[WriteVecALULd]>;
}
/// SS3I_unop_rm_int_y - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
-multiclass SS3I_unop_rm_int_y<bits<8> opc, string OpcodeStr,
- Intrinsic IntId256> {
+multiclass SS3I_unop_rm_y<bits<8> opc, string OpcodeStr, ValueType vt,
+ SDNode OpNode> {
def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (IntId256 VR256:$src))]>,
+ [(set VR256:$dst, (vt (OpNode VR256:$src)))]>,
Sched<[WriteVecALU]>;
def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
(ins i256mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst,
- (IntId256
- (bitconvert (loadv4i64 addr:$src))))]>,
+ (vt (OpNode (bitconvert (loadv4i64 addr:$src)))))]>,
Sched<[WriteVecALULd]>;
}
@@ -5487,14 +5396,15 @@ def v32i1sextv32i8 : PatLeaf<(v32i8 (X86pcmpgt (bc_v32i8 (v8i32 immAllZerosV)),
def v16i1sextv16i16: PatLeaf<(v16i16 (X86vsrai VR256:$src, (i8 15)))>;
def v8i1sextv8i32 : PatLeaf<(v8i32 (X86vsrai VR256:$src, (i8 31)))>;
-let Predicates = [HasAVX] in {
- defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", int_x86_ssse3_pabs_b_128,
- loadv2i64>, VEX;
- defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", int_x86_ssse3_pabs_w_128,
- loadv2i64>, VEX;
- defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", int_x86_ssse3_pabs_d_128,
- loadv2i64>, VEX;
+let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
+ defm VPABSB : SS3I_unop_rm<0x1C, "vpabsb", v16i8, X86Abs, loadv2i64>, VEX;
+ defm VPABSW : SS3I_unop_rm<0x1D, "vpabsw", v8i16, X86Abs, loadv2i64>, VEX;
+}
+let Predicates = [HasAVX, NoVLX] in {
+ defm VPABSD : SS3I_unop_rm<0x1E, "vpabsd", v4i32, X86Abs, loadv2i64>, VEX;
+}
+let Predicates = [HasAVX] in {
def : Pat<(xor
(bc_v2i64 (v16i1sextv16i8)),
(bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))),
@@ -5509,14 +5419,15 @@ let Predicates = [HasAVX] in {
(VPABSDrr128 VR128:$src)>;
}
-let Predicates = [HasAVX2] in {
- defm VPABSB : SS3I_unop_rm_int_y<0x1C, "vpabsb",
- int_x86_avx2_pabs_b>, VEX, VEX_L;
- defm VPABSW : SS3I_unop_rm_int_y<0x1D, "vpabsw",
- int_x86_avx2_pabs_w>, VEX, VEX_L;
- defm VPABSD : SS3I_unop_rm_int_y<0x1E, "vpabsd",
- int_x86_avx2_pabs_d>, VEX, VEX_L;
+let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
+ defm VPABSB : SS3I_unop_rm_y<0x1C, "vpabsb", v32i8, X86Abs>, VEX, VEX_L;
+ defm VPABSW : SS3I_unop_rm_y<0x1D, "vpabsw", v16i16, X86Abs>, VEX, VEX_L;
+}
+let Predicates = [HasAVX2, NoVLX] in {
+ defm VPABSD : SS3I_unop_rm_y<0x1E, "vpabsd", v8i32, X86Abs>, VEX, VEX_L;
+}
+let Predicates = [HasAVX2] in {
def : Pat<(xor
(bc_v4i64 (v32i1sextv32i8)),
(bc_v4i64 (add (v32i8 VR256:$src), (v32i1sextv32i8)))),
@@ -5531,14 +5442,11 @@ let Predicates = [HasAVX2] in {
(VPABSDrr256 VR256:$src)>;
}
-defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", int_x86_ssse3_pabs_b_128,
- memopv2i64>;
-defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", int_x86_ssse3_pabs_w_128,
- memopv2i64>;
-defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", int_x86_ssse3_pabs_d_128,
- memopv2i64>;
+defm PABSB : SS3I_unop_rm<0x1C, "pabsb", v16i8, X86Abs, memopv2i64>;
+defm PABSW : SS3I_unop_rm<0x1D, "pabsw", v8i16, X86Abs, memopv2i64>;
+defm PABSD : SS3I_unop_rm<0x1E, "pabsd", v4i32, X86Abs, memopv2i64>;
-let Predicates = [HasSSSE3] in {
+let Predicates = [UseSSSE3] in {
def : Pat<(xor
(bc_v2i64 (v16i1sextv16i8)),
(bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))),
@@ -5659,15 +5567,15 @@ let isCommutable = 0 in {
defm VPHSUBD : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v4i32, VR128,
loadv2i64, i128mem,
SSE_PHADDSUBD, 0>, VEX_4V;
- defm VPSIGNB : SS3I_binop_rm<0x08, "vpsignb", X86psign, v16i8, VR128,
- loadv2i64, i128mem,
- SSE_PSIGN, 0>, VEX_4V;
- defm VPSIGNW : SS3I_binop_rm<0x09, "vpsignw", X86psign, v8i16, VR128,
- loadv2i64, i128mem,
- SSE_PSIGN, 0>, VEX_4V;
- defm VPSIGND : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v4i32, VR128,
- loadv2i64, i128mem,
- SSE_PSIGN, 0>, VEX_4V;
+ defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb",
+ int_x86_ssse3_psign_b_128,
+ SSE_PSIGN, loadv2i64, 0>, VEX_4V;
+ defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw",
+ int_x86_ssse3_psign_w_128,
+ SSE_PSIGN, loadv2i64, 0>, VEX_4V;
+ defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd",
+ int_x86_ssse3_psign_d_128,
+ SSE_PSIGN, loadv2i64, 0>, VEX_4V;
defm VPSHUFB : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v16i8, VR128,
loadv2i64, i128mem,
SSE_PSHUFB, 0>, VEX_4V;
@@ -5700,15 +5608,12 @@ let isCommutable = 0 in {
defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, VR256,
loadv4i64, i256mem,
SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
- defm VPSIGNBY : SS3I_binop_rm<0x08, "vpsignb", X86psign, v32i8, VR256,
- loadv4i64, i256mem,
- SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
- defm VPSIGNWY : SS3I_binop_rm<0x09, "vpsignw", X86psign, v16i16, VR256,
- loadv4i64, i256mem,
- SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
- defm VPSIGNDY : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v8i32, VR256,
- loadv4i64, i256mem,
- SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
+ defm VPSIGNBY : SS3I_binop_rm_int_y<0x08, "vpsignb", int_x86_avx2_psign_b,
+ WriteVecALU>, VEX_4V, VEX_L;
+ defm VPSIGNWY : SS3I_binop_rm_int_y<0x09, "vpsignw", int_x86_avx2_psign_w,
+ WriteVecALU>, VEX_4V, VEX_L;
+ defm VPSIGNDY : SS3I_binop_rm_int_y<0x0A, "vpsignd", int_x86_avx2_psign_d,
+ WriteVecALU>, VEX_4V, VEX_L;
defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, VR256,
loadv4i64, i256mem,
SSE_PSHUFB, 0>, VEX_4V, VEX_L;
@@ -5738,12 +5643,12 @@ let isCommutable = 0 in {
memopv2i64, i128mem, SSE_PHADDSUBW>;
defm PHSUBD : SS3I_binop_rm<0x06, "phsubd", X86hsub, v4i32, VR128,
memopv2i64, i128mem, SSE_PHADDSUBD>;
- defm PSIGNB : SS3I_binop_rm<0x08, "psignb", X86psign, v16i8, VR128,
- memopv2i64, i128mem, SSE_PSIGN>;
- defm PSIGNW : SS3I_binop_rm<0x09, "psignw", X86psign, v8i16, VR128,
- memopv2i64, i128mem, SSE_PSIGN>;
- defm PSIGND : SS3I_binop_rm<0x0A, "psignd", X86psign, v4i32, VR128,
- memopv2i64, i128mem, SSE_PSIGN>;
+ defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", int_x86_ssse3_psign_b_128,
+ SSE_PSIGN, memopv2i64>;
+ defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", int_x86_ssse3_psign_w_128,
+ SSE_PSIGN, memopv2i64>;
+ defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", int_x86_ssse3_psign_d_128,
+ SSE_PSIGN, memopv2i64>;
defm PSHUFB : SS3I_binop_rm<0x00, "pshufb", X86pshufb, v16i8, VR128,
memopv2i64, i128mem, SSE_PSHUFB>;
defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw",
@@ -5767,7 +5672,7 @@ defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw",
multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
let hasSideEffects = 0 in {
- def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
+ def rri : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, u8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -5775,7 +5680,7 @@ multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[], IIC_SSE_PALIGNRR>, Sched<[WriteShuffle]>;
let mayLoad = 1 in
- def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
+ def rmi : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, u8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -5787,13 +5692,13 @@ multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
multiclass ssse3_palignr_y<string asm, bit Is2Addr = 1> {
let hasSideEffects = 0 in {
- def R256rr : SS3AI<0x0F, MRMSrcReg, (outs VR256:$dst),
+ def Yrri : SS3AI<0x0F, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, u8imm:$src3),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[]>, Sched<[WriteShuffle]>;
let mayLoad = 1 in
- def R256rm : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst),
+ def Yrmi : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, i256mem:$src2, u8imm:$src3),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
@@ -5802,43 +5707,43 @@ multiclass ssse3_palignr_y<string asm, bit Is2Addr = 1> {
}
let Predicates = [HasAVX] in
- defm VPALIGN : ssse3_palignr<"vpalignr", 0>, VEX_4V;
+ defm VPALIGNR : ssse3_palignr<"vpalignr", 0>, VEX_4V;
let Predicates = [HasAVX2] in
- defm VPALIGN : ssse3_palignr_y<"vpalignr", 0>, VEX_4V, VEX_L;
+ defm VPALIGNR : ssse3_palignr_y<"vpalignr", 0>, VEX_4V, VEX_L;
let Constraints = "$src1 = $dst", Predicates = [UseSSSE3] in
- defm PALIGN : ssse3_palignr<"palignr">;
+ defm PALIGNR : ssse3_palignr<"palignr">;
let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
def : Pat<(v8i32 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
- (VPALIGNR256rr VR256:$src1, VR256:$src2, imm:$imm)>;
+ (VPALIGNRYrri VR256:$src1, VR256:$src2, imm:$imm)>;
def : Pat<(v8f32 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
- (VPALIGNR256rr VR256:$src1, VR256:$src2, imm:$imm)>;
+ (VPALIGNRYrri VR256:$src1, VR256:$src2, imm:$imm)>;
def : Pat<(v16i16 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
- (VPALIGNR256rr VR256:$src1, VR256:$src2, imm:$imm)>;
+ (VPALIGNRYrri VR256:$src1, VR256:$src2, imm:$imm)>;
def : Pat<(v32i8 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
- (VPALIGNR256rr VR256:$src1, VR256:$src2, imm:$imm)>;
+ (VPALIGNRYrri VR256:$src1, VR256:$src2, imm:$imm)>;
}
let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
def : Pat<(v4i32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (VPALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
+ (VPALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
def : Pat<(v4f32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (VPALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
+ (VPALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
def : Pat<(v8i16 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (VPALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
+ (VPALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
def : Pat<(v16i8 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (VPALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
+ (VPALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
}
let Predicates = [UseSSSE3] in {
def : Pat<(v4i32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (PALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
+ (PALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
def : Pat<(v4f32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (PALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
+ (PALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
def : Pat<(v8i16 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (PALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
+ (PALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
def : Pat<(v16i8 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (PALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
+ (PALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
}
//===---------------------------------------------------------------------===//
@@ -5855,6 +5760,7 @@ def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
let Uses = [EAX, ECX, EDX] in
def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", [], IIC_SSE_MONITOR>,
TB, Requires<[HasSSE3]>;
+
let Uses = [ECX, EAX] in
def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait",
[(int_x86_sse3_mwait ECX, EAX)], IIC_SSE_MWAIT>,
@@ -5890,45 +5796,48 @@ multiclass SS41I_pmovx_rrrm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp,
multiclass SS41I_pmovx_rm_all<bits<8> opc, string OpcodeStr,
X86MemOperand MemOp, X86MemOperand MemYOp,
OpndItins SSEItins, OpndItins AVXItins,
- OpndItins AVX2Itins> {
+ OpndItins AVX2Itins, Predicate prd> {
defm NAME : SS41I_pmovx_rrrm<opc, OpcodeStr, MemOp, VR128, VR128, SSEItins>;
- let Predicates = [HasAVX, NoVLX] in
+ let Predicates = [HasAVX, prd] in
defm V#NAME : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemOp,
VR128, VR128, AVXItins>, VEX;
- let Predicates = [HasAVX2, NoVLX] in
+ let Predicates = [HasAVX2, prd] in
defm V#NAME#Y : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemYOp,
VR256, VR128, AVX2Itins>, VEX, VEX_L;
}
-multiclass SS41I_pmovx_rm<bits<8> opc, string OpcodeStr,
- X86MemOperand MemOp, X86MemOperand MemYOp> {
+multiclass SS41I_pmovx_rm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp,
+ X86MemOperand MemYOp, Predicate prd> {
defm PMOVSX#NAME : SS41I_pmovx_rm_all<opc, !strconcat("pmovsx", OpcodeStr),
MemOp, MemYOp,
SSE_INTALU_ITINS_SHUFF_P,
DEFAULT_ITINS_SHUFFLESCHED,
- DEFAULT_ITINS_SHUFFLESCHED>;
+ DEFAULT_ITINS_SHUFFLESCHED, prd>;
defm PMOVZX#NAME : SS41I_pmovx_rm_all<!add(opc, 0x10),
!strconcat("pmovzx", OpcodeStr),
MemOp, MemYOp,
SSE_INTALU_ITINS_SHUFF_P,
DEFAULT_ITINS_SHUFFLESCHED,
- DEFAULT_ITINS_SHUFFLESCHED>;
+ DEFAULT_ITINS_SHUFFLESCHED, prd>;
}
-defm BW : SS41I_pmovx_rm<0x20, "bw", i64mem, i128mem>;
-defm WD : SS41I_pmovx_rm<0x23, "wd", i64mem, i128mem>;
-defm DQ : SS41I_pmovx_rm<0x25, "dq", i64mem, i128mem>;
+defm BW : SS41I_pmovx_rm<0x20, "bw", i64mem, i128mem, NoVLX_Or_NoBWI>;
+defm WD : SS41I_pmovx_rm<0x23, "wd", i64mem, i128mem, NoVLX>;
+defm DQ : SS41I_pmovx_rm<0x25, "dq", i64mem, i128mem, NoVLX>;
-defm BD : SS41I_pmovx_rm<0x21, "bd", i32mem, i64mem>;
-defm WQ : SS41I_pmovx_rm<0x24, "wq", i32mem, i64mem>;
+defm BD : SS41I_pmovx_rm<0x21, "bd", i32mem, i64mem, NoVLX>;
+defm WQ : SS41I_pmovx_rm<0x24, "wq", i32mem, i64mem, NoVLX>;
-defm BQ : SS41I_pmovx_rm<0x22, "bq", i16mem, i32mem>;
+defm BQ : SS41I_pmovx_rm<0x22, "bq", i16mem, i32mem, NoVLX>;
// AVX2 Patterns
multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy, SDNode ExtOp> {
// Register-Register patterns
+ let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
def : Pat<(v16i16 (ExtOp (v16i8 VR128:$src))),
(!cast<I>(OpcPrefix#BWYrr) VR128:$src)>;
+ }
+ let Predicates = [HasAVX, NoVLX] in {
def : Pat<(v8i32 (ExtOp (v16i8 VR128:$src))),
(!cast<I>(OpcPrefix#BDYrr) VR128:$src)>;
def : Pat<(v4i64 (ExtOp (v16i8 VR128:$src))),
@@ -5941,26 +5850,14 @@ multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy, SDNode ExtO
def : Pat<(v4i64 (ExtOp (v4i32 VR128:$src))),
(!cast<I>(OpcPrefix#DQYrr) VR128:$src)>;
-
- // On AVX2, we also support 256bit inputs.
- def : Pat<(v16i16 (ExtOp (v32i8 VR256:$src))),
- (!cast<I>(OpcPrefix#BWYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
- def : Pat<(v8i32 (ExtOp (v32i8 VR256:$src))),
- (!cast<I>(OpcPrefix#BDYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
- def : Pat<(v4i64 (ExtOp (v32i8 VR256:$src))),
- (!cast<I>(OpcPrefix#BQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
-
- def : Pat<(v8i32 (ExtOp (v16i16 VR256:$src))),
- (!cast<I>(OpcPrefix#WDYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
- def : Pat<(v4i64 (ExtOp (v16i16 VR256:$src))),
- (!cast<I>(OpcPrefix#WQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
-
- def : Pat<(v4i64 (ExtOp (v8i32 VR256:$src))),
- (!cast<I>(OpcPrefix#DQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
+ }
// Simple Register-Memory patterns
+ let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
def : Pat<(v16i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
(!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
+ }
+ let Predicates = [HasAVX, NoVLX] in {
def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
(!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
@@ -5973,8 +5870,10 @@ multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy, SDNode ExtO
def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
(!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
+ }
// AVX2 Register-Memory patterns
+ let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
(!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
def : Pat<(v16i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
@@ -5983,7 +5882,8 @@ multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy, SDNode ExtO
(!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
(!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
-
+ }
+ let Predicates = [HasAVX, NoVLX] in {
def : Pat<(v8i32 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
(!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
def : Pat<(v8i32 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
@@ -6028,18 +5928,20 @@ multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy, SDNode ExtO
(!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
(!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
+ }
}
-let Predicates = [HasAVX2, NoVLX] in {
- defm : SS41I_pmovx_avx2_patterns<"VPMOVSX", "s", X86vsext>;
- defm : SS41I_pmovx_avx2_patterns<"VPMOVZX", "z", X86vzext>;
-}
+defm : SS41I_pmovx_avx2_patterns<"VPMOVSX", "s", X86vsext>;
+defm : SS41I_pmovx_avx2_patterns<"VPMOVZX", "z", X86vzext>;
// SSE4.1/AVX patterns.
multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
SDNode ExtOp, PatFrag ExtLoad16> {
+ let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
def : Pat<(v8i16 (ExtOp (v16i8 VR128:$src))),
(!cast<I>(OpcPrefix#BWrr) VR128:$src)>;
+ }
+ let Predicates = [HasAVX, NoVLX] in {
def : Pat<(v4i32 (ExtOp (v16i8 VR128:$src))),
(!cast<I>(OpcPrefix#BDrr) VR128:$src)>;
def : Pat<(v2i64 (ExtOp (v16i8 VR128:$src))),
@@ -6052,9 +5954,12 @@ multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
def : Pat<(v2i64 (ExtOp (v4i32 VR128:$src))),
(!cast<I>(OpcPrefix#DQrr) VR128:$src)>;
-
+ }
+ let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
def : Pat<(v8i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
(!cast<I>(OpcPrefix#BWrm) addr:$src)>;
+ }
+ let Predicates = [HasAVX, NoVLX] in {
def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
(!cast<I>(OpcPrefix#BDrm) addr:$src)>;
def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
@@ -6067,7 +5972,8 @@ multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
(!cast<I>(OpcPrefix#DQrm) addr:$src)>;
-
+ }
+ let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
(!cast<I>(OpcPrefix#BWrm) addr:$src)>;
def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
@@ -6078,7 +5984,8 @@ multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
(!cast<I>(OpcPrefix#BWrm) addr:$src)>;
def : Pat<(v8i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
(!cast<I>(OpcPrefix#BWrm) addr:$src)>;
-
+ }
+ let Predicates = [HasAVX, NoVLX] in {
def : Pat<(v4i32 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
(!cast<I>(OpcPrefix#BDrm) addr:$src)>;
def : Pat<(v4i32 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
@@ -6127,12 +6034,11 @@ multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
(!cast<I>(OpcPrefix#DQrm) addr:$src)>;
def : Pat<(v2i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
(!cast<I>(OpcPrefix#DQrm) addr:$src)>;
+ }
}
-let Predicates = [HasAVX, NoVLX] in {
- defm : SS41I_pmovx_patterns<"VPMOVSX", "s", X86vsext, extloadi32i16>;
- defm : SS41I_pmovx_patterns<"VPMOVZX", "z", X86vzext, loadi16_anyext>;
-}
+defm : SS41I_pmovx_patterns<"VPMOVSX", "s", X86vsext, extloadi32i16>;
+defm : SS41I_pmovx_patterns<"VPMOVZX", "z", X86vzext, loadi16_anyext>;
let Predicates = [UseSSE41] in {
defm : SS41I_pmovx_patterns<"PMOVSX", "s", X86vsext, extloadi32i16>;
@@ -6859,63 +6765,67 @@ multiclass SS48I_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
}
let Predicates = [HasAVX, NoVLX] in {
- defm VPMINSB : SS48I_binop_rm<0x38, "vpminsb", smin, v16i8, VR128,
- loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
- VEX_4V;
defm VPMINSD : SS48I_binop_rm<0x39, "vpminsd", smin, v4i32, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V;
defm VPMINUD : SS48I_binop_rm<0x3B, "vpminud", umin, v4i32, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V;
- defm VPMINUW : SS48I_binop_rm<0x3A, "vpminuw", umin, v8i16, VR128,
+ defm VPMAXSD : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v4i32, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V;
- defm VPMAXSB : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v16i8, VR128,
+ defm VPMAXUD : SS48I_binop_rm<0x3F, "vpmaxud", umax, v4i32, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V;
- defm VPMAXSD : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v4i32, VR128,
+ defm VPMULDQ : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v2i64, v4i32,
+ VR128, loadv2i64, i128mem,
+ SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
+}
+let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
+ defm VPMINSB : SS48I_binop_rm<0x38, "vpminsb", smin, v16i8, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V;
- defm VPMAXUD : SS48I_binop_rm<0x3F, "vpmaxud", umax, v4i32, VR128,
+ defm VPMINUW : SS48I_binop_rm<0x3A, "vpminuw", umin, v8i16, VR128,
+ loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V;
+ defm VPMAXSB : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v16i8, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V;
defm VPMAXUW : SS48I_binop_rm<0x3E, "vpmaxuw", umax, v8i16, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V;
- defm VPMULDQ : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v2i64, v4i32,
- VR128, loadv2i64, i128mem,
- SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
}
let Predicates = [HasAVX2, NoVLX] in {
- defm VPMINSBY : SS48I_binop_rm<0x38, "vpminsb", smin, v32i8, VR256,
- loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
- VEX_4V, VEX_L;
defm VPMINSDY : SS48I_binop_rm<0x39, "vpminsd", smin, v8i32, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L;
defm VPMINUDY : SS48I_binop_rm<0x3B, "vpminud", umin, v8i32, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L;
- defm VPMINUWY : SS48I_binop_rm<0x3A, "vpminuw", umin, v16i16, VR256,
+ defm VPMAXSDY : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v8i32, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L;
- defm VPMAXSBY : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v32i8, VR256,
+ defm VPMAXUDY : SS48I_binop_rm<0x3F, "vpmaxud", umax, v8i32, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L;
- defm VPMAXSDY : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v8i32, VR256,
+ defm VPMULDQY : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v4i64, v8i32,
+ VR256, loadv4i64, i256mem,
+ SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
+}
+let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
+ defm VPMINSBY : SS48I_binop_rm<0x38, "vpminsb", smin, v32i8, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L;
- defm VPMAXUDY : SS48I_binop_rm<0x3F, "vpmaxud", umax, v8i32, VR256,
+ defm VPMINUWY : SS48I_binop_rm<0x3A, "vpminuw", umin, v16i16, VR256,
+ loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V, VEX_L;
+ defm VPMAXSBY : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v32i8, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L;
defm VPMAXUWY : SS48I_binop_rm<0x3E, "vpmaxuw", umax, v16i16, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L;
- defm VPMULDQY : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v4i64, v8i32,
- VR256, loadv4i64, i256mem,
- SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
}
let Constraints = "$src1 = $dst" in {
@@ -7238,14 +7148,12 @@ let Predicates = [UseAVX] in {
// on targets where they have equal performance. These were changed to use
// blends because blends have better throughput on SandyBridge and Haswell, but
// movs[s/d] are 1-2 byte shorter instructions.
-let Predicates = [UseSSE41] in {
+let Predicates = [UseSSE41], AddedComplexity = 15 in {
// With SSE41 we can use blends for these patterns.
def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
(BLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
(PBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
- def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
- (BLENDPDrri (v2f64 (V_SET0)), VR128:$src, (i8 1))>;
}
@@ -7316,13 +7224,14 @@ let Predicates = [UseSSE41] in {
(BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
}
+let AddedComplexity = 400 in { // Prefer non-temporal versions
let SchedRW = [WriteLoad] in {
-let Predicates = [HasAVX] in
+let Predicates = [HasAVX, NoVLX] in
def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"vmovntdqa\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
VEX;
-let Predicates = [HasAVX2] in
+let Predicates = [HasAVX2, NoVLX] in
def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
"vmovntdqa\t{$src, $dst|$dst, $src}",
[(set VR256:$dst, (int_x86_avx2_movntdqa addr:$src))]>,
@@ -7332,6 +7241,35 @@ def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
[(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>;
} // SchedRW
+let Predicates = [HasAVX2, NoVLX] in {
+ def : Pat<(v8f32 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAYrm addr:$src)>;
+ def : Pat<(v4f64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAYrm addr:$src)>;
+ def : Pat<(v4i64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAYrm addr:$src)>;
+}
+
+let Predicates = [HasAVX, NoVLX] in {
+ def : Pat<(v4f32 (alignednontemporalload addr:$src)),
+ (VMOVNTDQArm addr:$src)>;
+ def : Pat<(v2f64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQArm addr:$src)>;
+ def : Pat<(v2i64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQArm addr:$src)>;
+}
+
+let Predicates = [UseSSE41] in {
+ def : Pat<(v4f32 (alignednontemporalload addr:$src)),
+ (MOVNTDQArm addr:$src)>;
+ def : Pat<(v2f64 (alignednontemporalload addr:$src)),
+ (MOVNTDQArm addr:$src)>;
+ def : Pat<(v2i64 (alignednontemporalload addr:$src)),
+ (MOVNTDQArm addr:$src)>;
+}
+
+} // AddedComplexity
+
//===----------------------------------------------------------------------===//
// SSE4.2 - Compare Instructions
//===----------------------------------------------------------------------===//
@@ -7815,14 +7753,24 @@ def INSERTQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
VR128:$mask))]>, XD;
}
+// Non-temporal (unaligned) scalar stores.
+let AddedComplexity = 400 in { // Prefer non-temporal versions
+let mayStore = 1, SchedRW = [WriteStore] in {
def MOVNTSS : I<0x2B, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
- "movntss\t{$src, $dst|$dst, $src}",
- [(int_x86_sse4a_movnt_ss addr:$dst, VR128:$src)]>, XS;
+ "movntss\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVNT>, XS;
def MOVNTSD : I<0x2B, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
- "movntsd\t{$src, $dst|$dst, $src}",
- [(int_x86_sse4a_movnt_sd addr:$dst, VR128:$src)]>, XD;
-}
+ "movntsd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVNT>, XD;
+} // SchedRW
+
+def : Pat<(nontemporalstore FR32:$src, addr:$dst),
+ (MOVNTSS addr:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
+
+def : Pat<(nontemporalstore FR64:$src, addr:$dst),
+ (MOVNTSD addr:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
+
+} // AddedComplexity
+} // HasSSE4A
//===----------------------------------------------------------------------===//
// AVX Instructions
@@ -7848,24 +7796,24 @@ class avx2_broadcast_rr<bits<8> opc, string OpcodeStr, RegisterClass RC,
[(set RC:$dst, (ResVT (X86VBroadcast (OpVT VR128:$src))))]>,
Sched<[Sched]>, VEX;
-let ExeDomain = SSEPackedSingle in {
+let ExeDomain = SSEPackedSingle, Predicates = [HasAVX, NoVLX] in {
def VBROADCASTSSrm : avx_broadcast_rm<0x18, "vbroadcastss", VR128,
f32mem, v4f32, loadf32, WriteLoad>;
def VBROADCASTSSYrm : avx_broadcast_rm<0x18, "vbroadcastss", VR256,
f32mem, v8f32, loadf32,
WriteFShuffleLd>, VEX_L;
}
-let ExeDomain = SSEPackedDouble in
+let ExeDomain = SSEPackedDouble, Predicates = [HasAVX, NoVLX] in
def VBROADCASTSDYrm : avx_broadcast_rm<0x19, "vbroadcastsd", VR256, f64mem,
v4f64, loadf64, WriteFShuffleLd>, VEX_L;
-let ExeDomain = SSEPackedSingle in {
+let ExeDomain = SSEPackedSingle, Predicates = [HasAVX2, NoVLX] in {
def VBROADCASTSSrr : avx2_broadcast_rr<0x18, "vbroadcastss", VR128,
v4f32, v4f32, WriteFShuffle>;
def VBROADCASTSSYrr : avx2_broadcast_rr<0x18, "vbroadcastss", VR256,
v8f32, v4f32, WriteFShuffle256>, VEX_L;
}
-let ExeDomain = SSEPackedDouble in
+let ExeDomain = SSEPackedDouble, Predicates = [HasAVX2, NoVLX] in
def VBROADCASTSDYrr : avx2_broadcast_rr<0x19, "vbroadcastsd", VR256,
v4f64, v2f64, WriteFShuffle256>, VEX_L;
@@ -7977,7 +7925,7 @@ def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
}
// AVX1 patterns
-let Predicates = [HasAVX] in {
+let Predicates = [HasAVX, NoVLX] in {
def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
(v4f32 (VEXTRACTF128rr
(v8f32 VR256:$src1),
@@ -8015,20 +7963,20 @@ def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
(v32i8 VR256:$src1),
(EXTRACT_get_vextract128_imm VR128:$ext)))>;
-def : Pat<(alignedstore (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
- (iPTR imm))), addr:$dst),
+def : Pat<(store (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
+ (iPTR imm))), addr:$dst),
(VEXTRACTF128mr addr:$dst, VR256:$src1,
(EXTRACT_get_vextract128_imm VR128:$ext))>;
-def : Pat<(alignedstore (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
- (iPTR imm))), addr:$dst),
+def : Pat<(store (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
+ (iPTR imm))), addr:$dst),
(VEXTRACTF128mr addr:$dst, VR256:$src1,
(EXTRACT_get_vextract128_imm VR128:$ext))>;
-def : Pat<(alignedstore (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
- (iPTR imm))), addr:$dst),
+def : Pat<(store (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
+ (iPTR imm))), addr:$dst),
(VEXTRACTF128mr addr:$dst, VR256:$src1,
(EXTRACT_get_vextract128_imm VR128:$ext))>;
-def : Pat<(alignedstore (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
- (iPTR imm))), addr:$dst),
+def : Pat<(store (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
+ (iPTR imm))), addr:$dst),
(VEXTRACTF128mr addr:$dst, VR256:$src1,
(EXTRACT_get_vextract128_imm VR128:$ext))>;
}
@@ -8078,45 +8026,45 @@ defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
RegisterClass RC, X86MemOperand x86memop_f,
X86MemOperand x86memop_i, PatFrag i_frag,
- Intrinsic IntVar, ValueType vt> {
- def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, RC:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V,
- Sched<[WriteFShuffle]>;
- def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, x86memop_i:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (IntVar RC:$src1,
- (bitconvert (i_frag addr:$src2))))]>, VEX_4V,
- Sched<[WriteFShuffleLd, ReadAfterLd]>;
-
+ ValueType f_vt, ValueType i_vt> {
let Predicates = [HasAVX, NoVLX] in {
+ def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (f_vt (X86VPermilpv RC:$src1, (i_vt RC:$src2))))]>, VEX_4V,
+ Sched<[WriteFShuffle]>;
+ def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop_i:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (f_vt (X86VPermilpv RC:$src1,
+ (i_vt (bitconvert (i_frag addr:$src2))))))]>, VEX_4V,
+ Sched<[WriteFShuffleLd, ReadAfterLd]>;
+
def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, u8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (vt (X86VPermilpi RC:$src1, (i8 imm:$src2))))]>, VEX,
+ [(set RC:$dst, (f_vt (X86VPermilpi RC:$src1, (i8 imm:$src2))))]>, VEX,
Sched<[WriteFShuffle]>;
def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
(ins x86memop_f:$src1, u8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
- (vt (X86VPermilpi (load addr:$src1), (i8 imm:$src2))))]>, VEX,
+ (f_vt (X86VPermilpi (load addr:$src1), (i8 imm:$src2))))]>, VEX,
Sched<[WriteFShuffleLd]>;
}// Predicates = [HasAVX, NoVLX]
}
let ExeDomain = SSEPackedSingle in {
defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
- loadv2i64, int_x86_avx_vpermilvar_ps, v4f32>;
+ loadv2i64, v4f32, v4i32>;
defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
- loadv4i64, int_x86_avx_vpermilvar_ps_256, v8f32>, VEX_L;
+ loadv4i64, v8f32, v8i32>, VEX_L;
}
let ExeDomain = SSEPackedDouble in {
defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
- loadv2i64, int_x86_avx_vpermilvar_pd, v2f64>;
+ loadv2i64, v2f64, v2i64>;
defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
- loadv4i64, int_x86_avx_vpermilvar_pd_256, v4f64>, VEX_L;
+ loadv4i64, v4f64, v4i64>, VEX_L;
}
let Predicates = [HasAVX, NoVLX] in {
@@ -8158,6 +8106,7 @@ def : Pat<(v2i64 (X86VPermilpi (loadv2i64 addr:$src1), (i8 imm:$imm))),
// VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
//
let ExeDomain = SSEPackedSingle in {
+let isCommutable = 1 in
def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, u8imm:$src3),
"vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
@@ -8276,9 +8225,14 @@ let Predicates = [HasF16C] in {
// Patterns for matching conversions from float to half-float and vice versa.
let Predicates = [HasF16C] in {
+ // Use MXCSR.RC for rounding instead of explicitly specifying the default
+ // rounding mode (Nearest-Even, encoded as 0). Both are equivalent in the
+ // configurations we support (the default). However, falling back to MXCSR is
+ // more consistent with other instructions, which are always controlled by it.
+ // It's encoded as 0b100.
def : Pat<(fp_to_f16 FR32:$src),
(i16 (EXTRACT_SUBREG (VMOVPDI2DIrr (VCVTPS2PHrr
- (COPY_TO_REGCLASS FR32:$src, VR128), 0)), sub_16bit))>;
+ (COPY_TO_REGCLASS FR32:$src, VR128), 4)), sub_16bit))>;
def : Pat<(f16_to_fp GR16:$src),
(f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
@@ -8286,7 +8240,7 @@ let Predicates = [HasF16C] in {
def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32:$src))),
(f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
- (VCVTPS2PHrr (COPY_TO_REGCLASS FR32:$src, VR128), 0)), FR32)) >;
+ (VCVTPS2PHrr (COPY_TO_REGCLASS FR32:$src, VR128), 4)), FR32)) >;
}
//===----------------------------------------------------------------------===//
@@ -8387,49 +8341,54 @@ let Predicates = [HasAVX2] in {
def : Pat<(v4f64 (X86VBroadcast (v4f64 VR256:$src))),
(VBROADCASTSDYrr (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src),
sub_xmm)))>;
+}
+let Predicates = [HasAVX2, NoVLX] in {
// Provide fallback in case the load node that is used in the patterns above
// is used by additional users, which prevents the pattern selection.
- let AddedComplexity = 20 in {
+ let AddedComplexity = 20 in {
def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
(VBROADCASTSSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
(VBROADCASTSSYrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
(VBROADCASTSDYrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
+ }
+}
+
+let Predicates = [HasAVX2, NoVLX_Or_NoBWI], AddedComplexity = 20 in {
+ def : Pat<(v16i8 (X86VBroadcast GR8:$src)),
+ (VPBROADCASTBrr (COPY_TO_REGCLASS
+ (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
+ VR128))>;
+ def : Pat<(v32i8 (X86VBroadcast GR8:$src)),
+ (VPBROADCASTBYrr (COPY_TO_REGCLASS
+ (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
+ VR128))>;
+
+ def : Pat<(v8i16 (X86VBroadcast GR16:$src)),
+ (VPBROADCASTWrr (COPY_TO_REGCLASS
+ (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
+ VR128))>;
+ def : Pat<(v16i16 (X86VBroadcast GR16:$src)),
+ (VPBROADCASTWYrr (COPY_TO_REGCLASS
+ (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
+ VR128))>;
+}
+let Predicates = [HasAVX2, NoVLX], AddedComplexity = 20 in {
+ def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
+ (VBROADCASTSSrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
+ def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
+ (VBROADCASTSSYrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
+ def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
+ (VBROADCASTSDYrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
- def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
- (VBROADCASTSSrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
- def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
- (VBROADCASTSSYrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
- def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
- (VBROADCASTSDYrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
-
- def : Pat<(v16i8 (X86VBroadcast GR8:$src)),
- (VPBROADCASTBrr (COPY_TO_REGCLASS
- (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
- VR128))>;
- def : Pat<(v32i8 (X86VBroadcast GR8:$src)),
- (VPBROADCASTBYrr (COPY_TO_REGCLASS
- (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
- VR128))>;
-
- def : Pat<(v8i16 (X86VBroadcast GR16:$src)),
- (VPBROADCASTWrr (COPY_TO_REGCLASS
- (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
- VR128))>;
- def : Pat<(v16i16 (X86VBroadcast GR16:$src)),
- (VPBROADCASTWYrr (COPY_TO_REGCLASS
- (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
- VR128))>;
-
- // The patterns for VPBROADCASTD are not needed because they would match
- // the exact same thing as VBROADCASTSS patterns.
-
- def : Pat<(v2i64 (X86VBroadcast GR64:$src)),
- (VPBROADCASTQrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
- // The v4i64 pattern is not needed because VBROADCASTSDYrr already match.
- }
+ // The patterns for VPBROADCASTD are not needed because they would match
+ // the exact same thing as VBROADCASTSS patterns.
+
+ def : Pat<(v2i64 (X86VBroadcast GR64:$src)),
+ (VPBROADCASTQrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
+ // The v4i64 pattern is not needed because VBROADCASTSDYrr already match.
}
// AVX1 broadcast patterns
@@ -8442,11 +8401,15 @@ def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
(VBROADCASTSSrm addr:$src)>;
}
-let Predicates = [HasAVX] in {
// Provide fallback in case the load node that is used in the patterns above
// is used by additional users, which prevents the pattern selection.
- let AddedComplexity = 20 in {
+let Predicates = [HasAVX], AddedComplexity = 20 in {
// 128bit broadcasts:
+ def : Pat<(v2f64 (X86VBroadcast f64:$src)),
+ (VMOVDDUPrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
+}
+
+let Predicates = [HasAVX, NoVLX], AddedComplexity = 20 in {
def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
(VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0)>;
def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
@@ -8468,12 +8431,9 @@ let Predicates = [HasAVX] in {
(VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
(VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), sub_xmm),
(VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), 1)>;
- }
- def : Pat<(v2f64 (X86VBroadcast f64:$src)),
- (VMOVDDUPrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
def : Pat<(v2i64 (X86VBroadcast i64:$src)),
- (VMOVDDUPrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
+ (VMOVDDUPrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
}
//===----------------------------------------------------------------------===//
@@ -8482,21 +8442,23 @@ let Predicates = [HasAVX] in {
multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
ValueType OpVT, X86FoldableSchedWrite Sched> {
- def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR256:$dst,
- (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>,
- Sched<[Sched]>, VEX_4V, VEX_L;
- def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, i256mem:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR256:$dst,
- (OpVT (X86VPermv VR256:$src1,
- (bitconvert (mem_frag addr:$src2)))))]>,
- Sched<[Sched.Folded, ReadAfterLd]>, VEX_4V, VEX_L;
+ let Predicates = [HasAVX2, NoVLX] in {
+ def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst,
+ (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>,
+ Sched<[Sched]>, VEX_4V, VEX_L;
+ def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, i256mem:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst,
+ (OpVT (X86VPermv VR256:$src1,
+ (bitconvert (mem_frag addr:$src2)))))]>,
+ Sched<[Sched.Folded, ReadAfterLd]>, VEX_4V, VEX_L;
+ }
}
defm VPERMD : avx2_perm<0x36, "vpermd", loadv4i64, v8i32, WriteShuffle256>;
@@ -8505,21 +8467,23 @@ defm VPERMPS : avx2_perm<0x16, "vpermps", loadv8f32, v8f32, WriteFShuffle256>;
multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
ValueType OpVT, X86FoldableSchedWrite Sched> {
- def Yri : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst),
- (ins VR256:$src1, u8imm:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR256:$dst,
- (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>,
- Sched<[Sched]>, VEX, VEX_L;
- def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
- (ins i256mem:$src1, u8imm:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR256:$dst,
- (OpVT (X86VPermi (mem_frag addr:$src1),
- (i8 imm:$src2))))]>,
- Sched<[Sched.Folded, ReadAfterLd]>, VEX, VEX_L;
+ let Predicates = [HasAVX2, NoVLX] in {
+ def Yri : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, u8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst,
+ (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>,
+ Sched<[Sched]>, VEX, VEX_L;
+ def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins i256mem:$src1, u8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst,
+ (OpVT (X86VPermi (mem_frag addr:$src1),
+ (i8 imm:$src2))))]>,
+ Sched<[Sched.Folded, ReadAfterLd]>, VEX, VEX_L;
+ }
}
defm VPERMQ : avx2_perm_imm<0x00, "vpermq", loadv4i64, v4i64,
@@ -8531,6 +8495,7 @@ defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", loadv4f64, v4f64,
//===----------------------------------------------------------------------===//
// VPERM2I128 - Permute Floating-Point Values in 128-bit chunks
//
+let isCommutable = 1 in
def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, u8imm:$src3),
"vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
@@ -8631,7 +8596,7 @@ def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
"vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
Sched<[WriteStore]>, VEX, VEX_L;
-let Predicates = [HasAVX2] in {
+let Predicates = [HasAVX2, NoVLX] in {
def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
(v2i64 (VEXTRACTI128rr
(v4i64 VR256:$src1),
@@ -8703,116 +8668,42 @@ defm VPMASKMOVQ : avx2_pmovmask<"vpmaskmovq",
int_x86_avx2_maskstore_q,
int_x86_avx2_maskstore_q_256>, VEX_W;
-def: Pat<(X86mstore addr:$ptr, (v8i32 VR256:$mask), (v8f32 VR256:$src)),
- (VMASKMOVPSYmr addr:$ptr, VR256:$mask, VR256:$src)>;
-
-def: Pat<(X86mstore addr:$ptr, (v8i32 VR256:$mask), (v8i32 VR256:$src)),
- (VPMASKMOVDYmr addr:$ptr, VR256:$mask, VR256:$src)>;
-
-def: Pat<(X86mstore addr:$ptr, (v4i32 VR128:$mask), (v4f32 VR128:$src)),
- (VMASKMOVPSmr addr:$ptr, VR128:$mask, VR128:$src)>;
-
-def: Pat<(X86mstore addr:$ptr, (v4i32 VR128:$mask), (v4i32 VR128:$src)),
- (VPMASKMOVDmr addr:$ptr, VR128:$mask, VR128:$src)>;
-
-def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask), undef)),
- (VMASKMOVPSYrm VR256:$mask, addr:$ptr)>;
-
-def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask),
- (bc_v8f32 (v8i32 immAllZerosV)))),
- (VMASKMOVPSYrm VR256:$mask, addr:$ptr)>;
-
-def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8f32 VR256:$src0))),
- (VBLENDVPSYrr VR256:$src0, (VMASKMOVPSYrm VR256:$mask, addr:$ptr),
- VR256:$mask)>;
-
-def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), undef)),
- (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
-
-def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8i32 immAllZerosV))),
- (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
-
-def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8i32 VR256:$src0))),
- (VBLENDVPSYrr VR256:$src0, (VPMASKMOVDYrm VR256:$mask, addr:$ptr),
- VR256:$mask)>;
-
-def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask), undef)),
- (VMASKMOVPSrm VR128:$mask, addr:$ptr)>;
-
-def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask),
- (bc_v4f32 (v4i32 immAllZerosV)))),
- (VMASKMOVPSrm VR128:$mask, addr:$ptr)>;
-
-def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4f32 VR128:$src0))),
- (VBLENDVPSrr VR128:$src0, (VMASKMOVPSrm VR128:$mask, addr:$ptr),
- VR128:$mask)>;
-
-def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), undef)),
- (VPMASKMOVDrm VR128:$mask, addr:$ptr)>;
-
-def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4i32 immAllZerosV))),
- (VPMASKMOVDrm VR128:$mask, addr:$ptr)>;
-
-def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4i32 VR128:$src0))),
- (VBLENDVPSrr VR128:$src0, (VPMASKMOVDrm VR128:$mask, addr:$ptr),
- VR128:$mask)>;
-
-def: Pat<(X86mstore addr:$ptr, (v4i64 VR256:$mask), (v4f64 VR256:$src)),
- (VMASKMOVPDYmr addr:$ptr, VR256:$mask, VR256:$src)>;
-
-def: Pat<(X86mstore addr:$ptr, (v4i64 VR256:$mask), (v4i64 VR256:$src)),
- (VPMASKMOVQYmr addr:$ptr, VR256:$mask, VR256:$src)>;
-
-def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask), undef)),
- (VMASKMOVPDYrm VR256:$mask, addr:$ptr)>;
-
-def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask),
- (v4f64 immAllZerosV))),
- (VMASKMOVPDYrm VR256:$mask, addr:$ptr)>;
-
-def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask), (v4f64 VR256:$src0))),
- (VBLENDVPDYrr VR256:$src0, (VMASKMOVPDYrm VR256:$mask, addr:$ptr),
- VR256:$mask)>;
-
-def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask), undef)),
- (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
-
-def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask),
- (bc_v4i64 (v8i32 immAllZerosV)))),
- (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
-
-def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask), (v4i64 VR256:$src0))),
- (VBLENDVPDYrr VR256:$src0, (VPMASKMOVQYrm VR256:$mask, addr:$ptr),
- VR256:$mask)>;
-
-def: Pat<(X86mstore addr:$ptr, (v2i64 VR128:$mask), (v2f64 VR128:$src)),
- (VMASKMOVPDmr addr:$ptr, VR128:$mask, VR128:$src)>;
-
-def: Pat<(X86mstore addr:$ptr, (v2i64 VR128:$mask), (v2i64 VR128:$src)),
- (VPMASKMOVQmr addr:$ptr, VR128:$mask, VR128:$src)>;
-
-def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask), undef)),
- (VMASKMOVPDrm VR128:$mask, addr:$ptr)>;
-
-def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask),
- (v2f64 immAllZerosV))),
- (VMASKMOVPDrm VR128:$mask, addr:$ptr)>;
-
-def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask), (v2f64 VR128:$src0))),
- (VBLENDVPDrr VR128:$src0, (VMASKMOVPDrm VR128:$mask, addr:$ptr),
- VR128:$mask)>;
-
-def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask), undef)),
- (VPMASKMOVQrm VR128:$mask, addr:$ptr)>;
-
-def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask),
- (bc_v2i64 (v4i32 immAllZerosV)))),
- (VPMASKMOVQrm VR128:$mask, addr:$ptr)>;
-
-def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask), (v2i64 VR128:$src0))),
- (VBLENDVPDrr VR128:$src0, (VPMASKMOVQrm VR128:$mask, addr:$ptr),
- VR128:$mask)>;
-
+multiclass maskmov_lowering<string InstrStr, RegisterClass RC, ValueType VT,
+ ValueType MaskVT, string BlendStr, ValueType ZeroVT> {
+ // masked store
+ def: Pat<(X86mstore addr:$ptr, (MaskVT RC:$mask), (VT RC:$src)),
+ (!cast<Instruction>(InstrStr#"mr") addr:$ptr, RC:$mask, RC:$src)>;
+ // masked load
+ def: Pat<(VT (masked_load addr:$ptr, (MaskVT RC:$mask), undef)),
+ (!cast<Instruction>(InstrStr#"rm") RC:$mask, addr:$ptr)>;
+ def: Pat<(VT (masked_load addr:$ptr, (MaskVT RC:$mask),
+ (VT (bitconvert (ZeroVT immAllZerosV))))),
+ (!cast<Instruction>(InstrStr#"rm") RC:$mask, addr:$ptr)>;
+ def: Pat<(VT (masked_load addr:$ptr, (MaskVT RC:$mask), (VT RC:$src0))),
+ (!cast<Instruction>(BlendStr#"rr")
+ RC:$src0,
+ (!cast<Instruction>(InstrStr#"rm") RC:$mask, addr:$ptr),
+ RC:$mask)>;
+}
+let Predicates = [HasAVX] in {
+ defm : maskmov_lowering<"VMASKMOVPS", VR128, v4f32, v4i32, "VBLENDVPS", v4i32>;
+ defm : maskmov_lowering<"VMASKMOVPD", VR128, v2f64, v2i64, "VBLENDVPD", v4i32>;
+ defm : maskmov_lowering<"VMASKMOVPSY", VR256, v8f32, v8i32, "VBLENDVPSY", v8i32>;
+ defm : maskmov_lowering<"VMASKMOVPDY", VR256, v4f64, v4i64, "VBLENDVPDY", v8i32>;
+}
+let Predicates = [HasAVX1Only] in {
+ // load/store i32/i64 not supported use ps/pd version
+ defm : maskmov_lowering<"VMASKMOVPSY", VR256, v8i32, v8i32, "VBLENDVPSY", v8i32>;
+ defm : maskmov_lowering<"VMASKMOVPDY", VR256, v4i64, v4i64, "VBLENDVPDY", v8i32>;
+ defm : maskmov_lowering<"VMASKMOVPS", VR128, v4i32, v4i32, "VBLENDVPS", v4i32>;
+ defm : maskmov_lowering<"VMASKMOVPD", VR128, v2i64, v2i64, "VBLENDVPD", v4i32>;
+}
+let Predicates = [HasAVX2] in {
+ defm : maskmov_lowering<"VPMASKMOVDY", VR256, v8i32, v8i32, "VBLENDVPSY", v8i32>;
+ defm : maskmov_lowering<"VPMASKMOVQY", VR256, v4i64, v4i64, "VBLENDVPDY", v8i32>;
+ defm : maskmov_lowering<"VPMASKMOVD", VR128, v4i32, v4i32, "VBLENDVPS", v4i32>;
+ defm : maskmov_lowering<"VPMASKMOVQ", VR128, v2i64, v2i64, "VBLENDVPD", v4i32>;
+}
//===----------------------------------------------------------------------===//
// Variable Bit Shifts
//
@@ -8852,6 +8743,8 @@ let Predicates = [HasAVX2, NoVLX] in {
defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, v4i32, v8i32>;
defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, v2i64, v4i64>, VEX_W;
defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, v4i32, v8i32>;
+ let isCodeGenOnly = 1 in
+ defm VPSRAVD_Int : avx2_var_shift<0x46, "vpsravd", X86vsrav, v4i32, v8i32>;
}
//===----------------------------------------------------------------------===//
// VGATHER - GATHER Operations
@@ -8869,22 +8762,22 @@ multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256,
[]>, VEX_4VOp3, VEX_L;
}
-let mayLoad = 1, Constraints
+let mayLoad = 1, hasSideEffects = 0, Constraints
= "@earlyclobber $dst,@earlyclobber $mask_wb, $src1 = $dst, $mask = $mask_wb"
in {
- defm VPGATHERDQ : avx2_gather<0x90, "vpgatherdq", VR256, vx64mem, vx64mem>, VEX_W;
- defm VPGATHERQQ : avx2_gather<0x91, "vpgatherqq", VR256, vx64mem, vy64mem>, VEX_W;
- defm VPGATHERDD : avx2_gather<0x90, "vpgatherdd", VR256, vx32mem, vy32mem>;
- defm VPGATHERQD : avx2_gather<0x91, "vpgatherqd", VR128, vx32mem, vy32mem>;
+ defm VPGATHERDQ : avx2_gather<0x90, "vpgatherdq", VR256, vx128mem, vx256mem>, VEX_W;
+ defm VPGATHERQQ : avx2_gather<0x91, "vpgatherqq", VR256, vx128mem, vy256mem>, VEX_W;
+ defm VPGATHERDD : avx2_gather<0x90, "vpgatherdd", VR256, vx128mem, vy256mem>;
+ defm VPGATHERQD : avx2_gather<0x91, "vpgatherqd", VR128, vx64mem, vy128mem>;
let ExeDomain = SSEPackedDouble in {
- defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx64mem, vx64mem>, VEX_W;
- defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx64mem, vy64mem>, VEX_W;
+ defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx128mem, vx256mem>, VEX_W;
+ defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx128mem, vy256mem>, VEX_W;
}
let ExeDomain = SSEPackedSingle in {
- defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx32mem, vy32mem>;
- defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", VR128, vx32mem, vy32mem>;
+ defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx128mem, vy256mem>;
+ defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", VR128, vx64mem, vy128mem>;
}
}
diff --git a/lib/Target/X86/X86InstrSystem.td b/lib/Target/X86/X86InstrSystem.td
index a97d1e5c86d0f..6667bd2aec4af 100644
--- a/lib/Target/X86/X86InstrSystem.td
+++ b/lib/Target/X86/X86InstrSystem.td
@@ -174,11 +174,11 @@ def MOV32rs : I<0x8C, MRMDestReg, (outs GR32:$dst), (ins SEGMENT_REG:$src),
def MOV64rs : RI<0x8C, MRMDestReg, (outs GR64:$dst), (ins SEGMENT_REG:$src),
"mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_SR>;
-def MOV16ms : I<0x8C, MRMDestMem, (outs i16mem:$dst), (ins SEGMENT_REG:$src),
+def MOV16ms : I<0x8C, MRMDestMem, (outs), (ins i16mem:$dst, SEGMENT_REG:$src),
"mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV_MEM_SR>, OpSize16;
-def MOV32ms : I<0x8C, MRMDestMem, (outs i32mem:$dst), (ins SEGMENT_REG:$src),
+def MOV32ms : I<0x8C, MRMDestMem, (outs), (ins i32mem:$dst, SEGMENT_REG:$src),
"mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_MEM_SR>, OpSize32;
-def MOV64ms : RI<0x8C, MRMDestMem, (outs i64mem:$dst), (ins SEGMENT_REG:$src),
+def MOV64ms : RI<0x8C, MRMDestMem, (outs), (ins i64mem:$dst, SEGMENT_REG:$src),
"mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_MEM_SR>;
def MOV16sr : I<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR16:$src),
@@ -248,7 +248,7 @@ def STR32r : I<0x00, MRM1r, (outs GR32:$dst), (ins),
"str{l}\t$dst", [], IIC_STR>, TB, OpSize32;
def STR64r : RI<0x00, MRM1r, (outs GR64:$dst), (ins),
"str{q}\t$dst", [], IIC_STR>, TB;
-def STRm : I<0x00, MRM1m, (outs i16mem:$dst), (ins),
+def STRm : I<0x00, MRM1m, (outs), (ins i16mem:$dst),
"str{w}\t$dst", [], IIC_STR>, TB;
def LTRr : I<0x00, MRM3r, (outs), (ins GR16:$src),
@@ -339,9 +339,11 @@ def POPGS64 : I<0xa9, RawFrm, (outs), (ins),
def LDS16rm : I<0xc5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "lds{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize16;
+ "lds{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize16,
+ Requires<[Not64BitMode]>;
def LDS32rm : I<0xc5, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "lds{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize32;
+ "lds{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize32,
+ Requires<[Not64BitMode]>;
def LSS16rm : I<0xb2, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
"lss{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize16;
@@ -351,9 +353,11 @@ def LSS64rm : RI<0xb2, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
"lss{q}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB;
def LES16rm : I<0xc4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "les{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize16;
+ "les{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize16,
+ Requires<[Not64BitMode]>;
def LES32rm : I<0xc4, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "les{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize32;
+ "les{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize32,
+ Requires<[Not64BitMode]>;
def LFS16rm : I<0xb4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
"lfs{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize16;
@@ -385,21 +389,21 @@ def VERWm : I<0x00, MRM5m, (outs), (ins i16mem:$seg),
// Descriptor-table support instructions
let SchedRW = [WriteSystem] in {
-def SGDT16m : I<0x01, MRM0m, (outs opaque48mem:$dst), (ins),
+def SGDT16m : I<0x01, MRM0m, (outs), (ins opaque48mem:$dst),
"sgdt{w}\t$dst", [], IIC_SGDT>, TB, OpSize16, Requires<[Not64BitMode]>;
-def SGDT32m : I<0x01, MRM0m, (outs opaque48mem:$dst), (ins),
+def SGDT32m : I<0x01, MRM0m, (outs), (ins opaque48mem:$dst),
"sgdt{l}\t$dst", [], IIC_SGDT>, OpSize32, TB, Requires <[Not64BitMode]>;
-def SGDT64m : I<0x01, MRM0m, (outs opaque80mem:$dst), (ins),
+def SGDT64m : I<0x01, MRM0m, (outs), (ins opaque80mem:$dst),
"sgdt{q}\t$dst", [], IIC_SGDT>, TB, Requires <[In64BitMode]>;
-def SIDT16m : I<0x01, MRM1m, (outs opaque48mem:$dst), (ins),
+def SIDT16m : I<0x01, MRM1m, (outs), (ins opaque48mem:$dst),
"sidt{w}\t$dst", [], IIC_SIDT>, TB, OpSize16, Requires<[Not64BitMode]>;
-def SIDT32m : I<0x01, MRM1m, (outs opaque48mem:$dst), (ins),
+def SIDT32m : I<0x01, MRM1m, (outs), (ins opaque48mem:$dst),
"sidt{l}\t$dst", []>, OpSize32, TB, Requires <[Not64BitMode]>;
-def SIDT64m : I<0x01, MRM1m, (outs opaque80mem:$dst), (ins),
+def SIDT64m : I<0x01, MRM1m, (outs), (ins opaque80mem:$dst),
"sidt{q}\t$dst", []>, TB, Requires <[In64BitMode]>;
def SLDT16r : I<0x00, MRM0r, (outs GR16:$dst), (ins),
"sldt{w}\t$dst", [], IIC_SLDT>, TB, OpSize16;
-def SLDT16m : I<0x00, MRM0m, (outs i16mem:$dst), (ins),
+def SLDT16m : I<0x00, MRM0m, (outs), (ins i16mem:$dst),
"sldt{w}\t$dst", [], IIC_SLDT>, TB;
def SLDT32r : I<0x00, MRM0r, (outs GR32:$dst), (ins),
"sldt{l}\t$dst", [], IIC_SLDT>, OpSize32, TB;
@@ -408,7 +412,7 @@ def SLDT32r : I<0x00, MRM0r, (outs GR32:$dst), (ins),
// extension.
def SLDT64r : RI<0x00, MRM0r, (outs GR64:$dst), (ins),
"sldt{q}\t$dst", [], IIC_SLDT>, TB;
-def SLDT64m : RI<0x00, MRM0m, (outs i16mem:$dst), (ins),
+def SLDT64m : RI<0x00, MRM0m, (outs), (ins i16mem:$dst),
"sldt{q}\t$dst", [], IIC_SLDT>, TB;
def LGDT16m : I<0x01, MRM2m, (outs), (ins opaque48mem:$src),
@@ -450,7 +454,7 @@ def SMSW64r : RI<0x01, MRM4r, (outs GR64:$dst), (ins),
"smsw{q}\t$dst", [], IIC_SMSW>, TB;
// For memory operands, there is only a 16-bit form
-def SMSW16m : I<0x01, MRM4m, (outs i16mem:$dst), (ins),
+def SMSW16m : I<0x01, MRM4m, (outs), (ins i16mem:$dst),
"smsw{w}\t$dst", [], IIC_SMSW>, TB;
def LMSW16r : I<0x01, MRM6r, (outs), (ins GR16:$src),
@@ -558,7 +562,7 @@ let usesCustomInserter = 1 in {
[(set GR32:$dst, (int_x86_rdpkru))]>;
}
-let Defs = [EAX, EDX], Uses = [ECX] in
+let Defs = [EAX, EDX], Uses = [ECX] in
def RDPKRUr : I<0x01, MRM_EE, (outs), (ins), "rdpkru", []>, TB;
let Uses = [EAX, ECX, EDX] in
def WRPKRUr : I<0x01, MRM_EF, (outs), (ins), "wrpkru", []>, TB;
diff --git a/lib/Target/X86/X86InstrVMX.td b/lib/Target/X86/X86InstrVMX.td
index 79afe9a654091..2ea27a934b478 100644
--- a/lib/Target/X86/X86InstrVMX.td
+++ b/lib/Target/X86/X86InstrVMX.td
@@ -41,13 +41,13 @@ def VMLAUNCH : I<0x01, MRM_C2, (outs), (ins), "vmlaunch", []>, TB;
def VMRESUME : I<0x01, MRM_C3, (outs), (ins), "vmresume", []>, TB;
def VMPTRLDm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs),
"vmptrld\t$vmcs", []>, PS;
-def VMPTRSTm : I<0xC7, MRM7m, (outs i64mem:$vmcs), (ins),
+def VMPTRSTm : I<0xC7, MRM7m, (outs), (ins i64mem:$vmcs),
"vmptrst\t$vmcs", []>, TB;
-def VMREAD64rm : I<0x78, MRMDestMem, (outs i64mem:$dst), (ins GR64:$src),
+def VMREAD64rm : I<0x78, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"vmread{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
def VMREAD64rr : I<0x78, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
"vmread{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
-def VMREAD32rm : I<0x78, MRMDestMem, (outs i32mem:$dst), (ins GR32:$src),
+def VMREAD32rm : I<0x78, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"vmread{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
def VMREAD32rr : I<0x78, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
"vmread{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
diff --git a/lib/Target/X86/X86InstrXOP.td b/lib/Target/X86/X86InstrXOP.td
index 4cb2304e464da..f49917b80f368 100644
--- a/lib/Target/X86/X86InstrXOP.td
+++ b/lib/Target/X86/X86InstrXOP.td
@@ -222,123 +222,199 @@ let ExeDomain = SSEPackedInt in { // SSE integer instructions
defm VPCOMUQ : xopvpcom<0xEF, "uq", X86vpcomu, v2i64>;
}
-// Instruction where either second or third source can be memory
-multiclass xop4op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
- def rr : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, VR128:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR128:$dst, (Int VR128:$src1, VR128:$src2, VR128:$src3))]>,
- XOP_4V, VEX_I8IMM;
- def rm : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i128mem:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR128:$dst,
- (Int VR128:$src1, VR128:$src2,
- (bitconvert (loadv2i64 addr:$src3))))]>,
- XOP_4V, VEX_I8IMM, VEX_W, MemOp4;
- def mr : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2, VR128:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR128:$dst,
- (Int VR128:$src1, (bitconvert (loadv2i64 addr:$src2)),
- VR128:$src3))]>,
- XOP_4V, VEX_I8IMM;
+multiclass xop4op<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ ValueType vt128> {
+ def rrr : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2),
+ (vt128 VR128:$src3))))]>,
+ XOP_4V, VEX_I8IMM;
+ def rrm : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i128mem:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2),
+ (vt128 (bitconvert (loadv2i64 addr:$src3))))))]>,
+ XOP_4V, VEX_I8IMM, VEX_W, MemOp4;
+ def rmr : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (v16i8 (OpNode (vt128 VR128:$src1), (vt128 (bitconvert (loadv2i64 addr:$src2))),
+ (vt128 VR128:$src3))))]>,
+ XOP_4V, VEX_I8IMM;
+ // For disassembler
+ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
+ def rrr_REV : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, XOP_4V, VEX_I8IMM, VEX_W, MemOp4;
}
let ExeDomain = SSEPackedInt in {
- defm VPPERM : xop4op<0xA3, "vpperm", int_x86_xop_vpperm>;
- defm VPCMOV : xop4op<0xA2, "vpcmov", int_x86_xop_vpcmov>;
+ defm VPPERM : xop4op<0xA3, "vpperm", X86vpperm, v16i8>;
}
-multiclass xop4op256<bits<8> opc, string OpcodeStr, Intrinsic Int> {
- def rrY : IXOPi8<opc, MRMSrcReg, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2, VR256:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR256:$dst, (Int VR256:$src1, VR256:$src2, VR256:$src3))]>,
- XOP_4V, VEX_I8IMM, VEX_L;
- def rmY : IXOPi8<opc, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2, i256mem:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR256:$dst,
- (Int VR256:$src1, VR256:$src2,
- (bitconvert (loadv4i64 addr:$src3))))]>,
- XOP_4V, VEX_I8IMM, VEX_W, MemOp4, VEX_L;
- def mrY : IXOPi8<opc, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, f256mem:$src2, VR256:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR256:$dst,
- (Int VR256:$src1, (bitconvert (loadv4i64 addr:$src2)),
- VR256:$src3))]>,
- XOP_4V, VEX_I8IMM, VEX_L;
+// Instruction where either second or third source can be memory
+multiclass xop4op_int<bits<8> opc, string OpcodeStr,
+ Intrinsic Int128, Intrinsic Int256> {
+ // 128-bit Instruction
+ def rrr : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst, (Int128 VR128:$src1, VR128:$src2, VR128:$src3))]>,
+ XOP_4V, VEX_I8IMM;
+ def rrm : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i128mem:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (Int128 VR128:$src1, VR128:$src2,
+ (bitconvert (loadv2i64 addr:$src3))))]>,
+ XOP_4V, VEX_I8IMM, VEX_W, MemOp4;
+ def rmr : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (Int128 VR128:$src1, (bitconvert (loadv2i64 addr:$src2)),
+ VR128:$src3))]>,
+ XOP_4V, VEX_I8IMM;
+ // For disassembler
+ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
+ def rrr_REV : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, XOP_4V, VEX_I8IMM, VEX_W, MemOp4;
+
+ // 256-bit Instruction
+ def rrrY : IXOPi8<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, VR256:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR256:$dst, (Int256 VR256:$src1, VR256:$src2, VR256:$src3))]>,
+ XOP_4V, VEX_I8IMM, VEX_L;
+ def rrmY : IXOPi8<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, i256mem:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR256:$dst,
+ (Int256 VR256:$src1, VR256:$src2,
+ (bitconvert (loadv4i64 addr:$src3))))]>,
+ XOP_4V, VEX_I8IMM, VEX_W, MemOp4, VEX_L;
+ def rmrY : IXOPi8<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, f256mem:$src2, VR256:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR256:$dst,
+ (Int256 VR256:$src1, (bitconvert (loadv4i64 addr:$src2)),
+ VR256:$src3))]>,
+ XOP_4V, VEX_I8IMM, VEX_L;
+ // For disassembler
+ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
+ def rrrY_REV : IXOPi8<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, VR256:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, XOP_4V, VEX_I8IMM, VEX_W, MemOp4, VEX_L;
}
-let ExeDomain = SSEPackedInt in
- defm VPCMOV : xop4op256<0xA2, "vpcmov", int_x86_xop_vpcmov_256>;
+let ExeDomain = SSEPackedInt in {
+ defm VPCMOV : xop4op_int<0xA2, "vpcmov",
+ int_x86_xop_vpcmov, int_x86_xop_vpcmov_256>;
+}
let Predicates = [HasXOP] in {
def : Pat<(v2i64 (or (and VR128:$src3, VR128:$src1),
(X86andnp VR128:$src3, VR128:$src2))),
- (VPCMOVrr VR128:$src1, VR128:$src2, VR128:$src3)>;
+ (VPCMOVrrr VR128:$src1, VR128:$src2, VR128:$src3)>;
def : Pat<(v4i64 (or (and VR256:$src3, VR256:$src1),
(X86andnp VR256:$src3, VR256:$src2))),
- (VPCMOVrrY VR256:$src1, VR256:$src2, VR256:$src3)>;
+ (VPCMOVrrrY VR256:$src1, VR256:$src2, VR256:$src3)>;
}
-multiclass xop5op<bits<8> opc, string OpcodeStr, Intrinsic Int128,
- Intrinsic Int256, PatFrag ld_128, PatFrag ld_256> {
+multiclass xop5op<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ ValueType vt128, ValueType vt256,
+ ValueType id128, ValueType id256,
+ PatFrag ld_128, PatFrag ld_256> {
def rr : IXOP5<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, VR128:$src3, u8imm:$src4),
!strconcat(OpcodeStr,
"\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
[(set VR128:$dst,
- (Int128 VR128:$src1, VR128:$src2, VR128:$src3, imm:$src4))]>;
+ (vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2),
+ (id128 VR128:$src3), (i8 imm:$src4))))]>;
def rm : IXOP5<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, f128mem:$src3, u8imm:$src4),
+ (ins VR128:$src1, VR128:$src2, i128mem:$src3, u8imm:$src4),
!strconcat(OpcodeStr,
"\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
[(set VR128:$dst,
- (Int128 VR128:$src1, VR128:$src2, (ld_128 addr:$src3), imm:$src4))]>,
+ (vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2),
+ (id128 (bitconvert (loadv2i64 addr:$src3))),
+ (i8 imm:$src4))))]>,
VEX_W, MemOp4;
def mr : IXOP5<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, f128mem:$src2, VR128:$src3, u8imm:$src4),
!strconcat(OpcodeStr,
"\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
[(set VR128:$dst,
- (Int128 VR128:$src1, (ld_128 addr:$src2), VR128:$src3, imm:$src4))]>;
+ (vt128 (OpNode (vt128 VR128:$src1),
+ (vt128 (bitconvert (ld_128 addr:$src2))),
+ (id128 VR128:$src3), (i8 imm:$src4))))]>;
+ // For disassembler
+ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
+ def rr_REV : IXOP5<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3, u8imm:$src4),
+ !strconcat(OpcodeStr,
+ "\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
+ []>, VEX_W, MemOp4;
+
def rrY : IXOP5<opc, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, VR256:$src3, u8imm:$src4),
!strconcat(OpcodeStr,
"\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
[(set VR256:$dst,
- (Int256 VR256:$src1, VR256:$src2, VR256:$src3, imm:$src4))]>, VEX_L;
+ (vt256 (OpNode (vt256 VR256:$src1), (vt256 VR256:$src2),
+ (id256 VR256:$src3), (i8 imm:$src4))))]>, VEX_L;
def rmY : IXOP5<opc, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2, f256mem:$src3, u8imm:$src4),
+ (ins VR256:$src1, VR256:$src2, i256mem:$src3, u8imm:$src4),
!strconcat(OpcodeStr,
"\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
[(set VR256:$dst,
- (Int256 VR256:$src1, VR256:$src2, (ld_256 addr:$src3), imm:$src4))]>,
- VEX_W, MemOp4, VEX_L;
+ (vt256 (OpNode (vt256 VR256:$src1), (vt256 VR256:$src2),
+ (id256 (bitconvert (loadv4i64 addr:$src3))),
+ (i8 imm:$src4))))]>, VEX_W, MemOp4, VEX_L;
def mrY : IXOP5<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f256mem:$src2, VR256:$src3, u8imm:$src4),
!strconcat(OpcodeStr,
"\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
[(set VR256:$dst,
- (Int256 VR256:$src1, (ld_256 addr:$src2), VR256:$src3, imm:$src4))]>,
- VEX_L;
+ (vt256 (OpNode (vt256 VR256:$src1),
+ (vt256 (bitconvert (ld_256 addr:$src2))),
+ (id256 VR256:$src3), (i8 imm:$src4))))]>, VEX_L;
+ // For disassembler
+ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
+ def rrY_REV : IXOP5<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, VR256:$src3, u8imm:$src4),
+ !strconcat(OpcodeStr,
+ "\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
+ []>, VEX_W, MemOp4, VEX_L;
}
let ExeDomain = SSEPackedDouble in
- defm VPERMIL2PD : xop5op<0x49, "vpermil2pd", int_x86_xop_vpermil2pd,
- int_x86_xop_vpermil2pd_256, loadv2f64, loadv4f64>;
+ defm VPERMIL2PD : xop5op<0x49, "vpermil2pd", X86vpermil2, v2f64, v4f64,
+ v2i64, v4i64, loadv2f64, loadv4f64>;
let ExeDomain = SSEPackedSingle in
- defm VPERMIL2PS : xop5op<0x48, "vpermil2ps", int_x86_xop_vpermil2ps,
- int_x86_xop_vpermil2ps_256, loadv4f32, loadv8f32>;
+ defm VPERMIL2PS : xop5op<0x48, "vpermil2ps", X86vpermil2, v4f32, v8f32,
+ v4i32, v8i32, loadv4f32, loadv8f32>;
diff --git a/lib/Target/X86/X86IntrinsicsInfo.h b/lib/Target/X86/X86IntrinsicsInfo.h
index b525d5eb60a7c..b647d11e38663 100644
--- a/lib/Target/X86/X86IntrinsicsInfo.h
+++ b/lib/Target/X86/X86IntrinsicsInfo.h
@@ -14,31 +14,36 @@
#ifndef LLVM_LIB_TARGET_X86_X86INTRINSICSINFO_H
#define LLVM_LIB_TARGET_X86_X86INTRINSICSINFO_H
+#include "X86ISelLowering.h"
+#include "X86InstrInfo.h"
+
namespace llvm {
-enum IntrinsicType {
+enum IntrinsicType : uint16_t {
INTR_NO_TYPE,
GATHER, SCATTER, PREFETCH, RDSEED, RDRAND, RDPMC, RDTSC, XTEST, ADX, FPCLASS, FPCLASSS,
INTR_TYPE_1OP, INTR_TYPE_2OP, INTR_TYPE_2OP_IMM8, INTR_TYPE_3OP, INTR_TYPE_4OP,
- CMP_MASK, CMP_MASK_CC,CMP_MASK_SCALAR_CC, VSHIFT, VSHIFT_MASK, COMI, COMI_RM,
+ CMP_MASK, CMP_MASK_CC,CMP_MASK_SCALAR_CC, VSHIFT, COMI, COMI_RM,
INTR_TYPE_1OP_MASK, INTR_TYPE_1OP_MASK_RM,
INTR_TYPE_2OP_MASK, INTR_TYPE_2OP_MASK_RM, INTR_TYPE_2OP_IMM8_MASK,
INTR_TYPE_3OP_MASK, INTR_TYPE_3OP_MASK_RM, INTR_TYPE_3OP_IMM8_MASK,
- FMA_OP_MASK, FMA_OP_MASKZ, FMA_OP_MASK3, VPERM_3OP_MASK,
- VPERM_3OP_MASKZ, INTR_TYPE_SCALAR_MASK,
+ FMA_OP_MASK, FMA_OP_MASKZ, FMA_OP_MASK3,
+ FMA_OP_SCALAR_MASK, FMA_OP_SCALAR_MASKZ, FMA_OP_SCALAR_MASK3,
+ VPERM_2OP_MASK, VPERM_3OP_MASK, VPERM_3OP_MASKZ, INTR_TYPE_SCALAR_MASK,
INTR_TYPE_SCALAR_MASK_RM, INTR_TYPE_3OP_SCALAR_MASK_RM,
- COMPRESS_EXPAND_IN_REG, COMPRESS_TO_MEM, BRCST_SUBVEC_TO_VEC,
+ COMPRESS_EXPAND_IN_REG, COMPRESS_TO_MEM, BRCST_SUBVEC_TO_VEC, BRCST32x2_TO_VEC,
TRUNCATE_TO_MEM_VI8, TRUNCATE_TO_MEM_VI16, TRUNCATE_TO_MEM_VI32,
- EXPAND_FROM_MEM, LOADA, LOADU, BLEND, INSERT_SUBVEC,
- TERLOG_OP_MASK, TERLOG_OP_MASKZ, BROADCASTM, KUNPCK, CONVERT_MASK_TO_VEC, CONVERT_TO_MASK
+ EXPAND_FROM_MEM, INSERT_SUBVEC,
+ TERLOG_OP_MASK, TERLOG_OP_MASKZ, BROADCASTM, KUNPCK, FIXUPIMM, FIXUPIMM_MASKZ, FIXUPIMMS,
+ FIXUPIMMS_MASKZ, CONVERT_MASK_TO_VEC, CONVERT_TO_MASK
};
struct IntrinsicData {
- unsigned Id;
+ uint16_t Id;
IntrinsicType Type;
- unsigned Opc0;
- unsigned Opc1;
+ uint16_t Opc0;
+ uint16_t Opc1;
bool operator<(const IntrinsicData &RHS) const {
return Id < RHS.Id;
@@ -61,6 +66,14 @@ static const IntrinsicData IntrinsicsWithChain[] = {
X86_INTRINSIC_DATA(addcarryx_u32, ADX, X86ISD::ADC, 0),
X86_INTRINSIC_DATA(addcarryx_u64, ADX, X86ISD::ADC, 0),
+ X86_INTRINSIC_DATA(avx512_gather_dpd_512, GATHER, X86::VGATHERDPDZrm, 0),
+ X86_INTRINSIC_DATA(avx512_gather_dpi_512, GATHER, X86::VPGATHERDDZrm, 0),
+ X86_INTRINSIC_DATA(avx512_gather_dpq_512, GATHER, X86::VPGATHERDQZrm, 0),
+ X86_INTRINSIC_DATA(avx512_gather_dps_512, GATHER, X86::VGATHERDPSZrm, 0),
+ X86_INTRINSIC_DATA(avx512_gather_qpd_512, GATHER, X86::VGATHERQPDZrm, 0),
+ X86_INTRINSIC_DATA(avx512_gather_qpi_512, GATHER, X86::VPGATHERQDZrm, 0),
+ X86_INTRINSIC_DATA(avx512_gather_qpq_512, GATHER, X86::VPGATHERQQZrm, 0),
+ X86_INTRINSIC_DATA(avx512_gather_qps_512, GATHER, X86::VGATHERQPSZrm, 0),
X86_INTRINSIC_DATA(avx512_gather3div2_df, GATHER, X86::VGATHERQPDZ128rm, 0),
X86_INTRINSIC_DATA(avx512_gather3div2_di, GATHER, X86::VPGATHERQQZ128rm, 0),
X86_INTRINSIC_DATA(avx512_gather3div4_df, GATHER, X86::VGATHERQPDZ256rm, 0),
@@ -77,14 +90,6 @@ static const IntrinsicData IntrinsicsWithChain[] = {
X86_INTRINSIC_DATA(avx512_gather3siv4_si, GATHER, X86::VPGATHERDDZ128rm, 0),
X86_INTRINSIC_DATA(avx512_gather3siv8_sf, GATHER, X86::VGATHERDPSZ256rm, 0),
X86_INTRINSIC_DATA(avx512_gather3siv8_si, GATHER, X86::VPGATHERDDZ256rm, 0),
- X86_INTRINSIC_DATA(avx512_gather_dpd_512, GATHER, X86::VGATHERDPDZrm, 0),
- X86_INTRINSIC_DATA(avx512_gather_dpi_512, GATHER, X86::VPGATHERDDZrm, 0),
- X86_INTRINSIC_DATA(avx512_gather_dpq_512, GATHER, X86::VPGATHERDQZrm, 0),
- X86_INTRINSIC_DATA(avx512_gather_dps_512, GATHER, X86::VGATHERDPSZrm, 0),
- X86_INTRINSIC_DATA(avx512_gather_qpd_512, GATHER, X86::VGATHERQPDZrm, 0),
- X86_INTRINSIC_DATA(avx512_gather_qpi_512, GATHER, X86::VPGATHERQDZrm, 0),
- X86_INTRINSIC_DATA(avx512_gather_qpq_512, GATHER, X86::VPGATHERQQZrm, 0),
- X86_INTRINSIC_DATA(avx512_gather_qps_512, GATHER, X86::VGATHERQPSZrm, 0),
X86_INTRINSIC_DATA(avx512_gatherpf_dpd_512, PREFETCH,
X86::VGATHERPF0DPDm, X86::VGATHERPF1DPDm),
@@ -143,18 +148,6 @@ static const IntrinsicData IntrinsicsWithChain[] = {
EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_load_q_512,
EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
- X86_INTRINSIC_DATA(avx512_mask_load_pd_128, LOADA, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_load_pd_256, LOADA, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_load_pd_512, LOADA, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_load_ps_128, LOADA, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_load_ps_256, LOADA, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_load_ps_512, LOADA, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_loadu_pd_128, LOADU, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_loadu_pd_256, LOADU, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_loadu_pd_512, LOADU, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_loadu_ps_128, LOADU, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_loadu_ps_256, LOADU, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_loadu_ps_512, LOADU, ISD::DELETED_NODE, 0),
X86_INTRINSIC_DATA(avx512_mask_pmov_db_mem_128, TRUNCATE_TO_MEM_VI8,
X86ISD::VTRUNC, 0),
X86_INTRINSIC_DATA(avx512_mask_pmov_db_mem_256, TRUNCATE_TO_MEM_VI8,
@@ -223,7 +216,6 @@ static const IntrinsicData IntrinsicsWithChain[] = {
X86_INTRINSIC_DATA(avx512_scattersiv4_si, SCATTER, X86::VPSCATTERDDZ128mr, 0),
X86_INTRINSIC_DATA(avx512_scattersiv8_sf, SCATTER, X86::VSCATTERDPSZ256mr, 0),
X86_INTRINSIC_DATA(avx512_scattersiv8_si, SCATTER, X86::VPSCATTERDDZ256mr, 0),
-
X86_INTRINSIC_DATA(rdpmc, RDPMC, X86ISD::RDPMC_DAG, 0),
X86_INTRINSIC_DATA(rdrand_16, RDRAND, X86ISD::RDRAND, 0),
X86_INTRINSIC_DATA(rdrand_32, RDRAND, X86ISD::RDRAND, 0),
@@ -242,7 +234,7 @@ static const IntrinsicData IntrinsicsWithChain[] = {
/*
* Find Intrinsic data by intrinsic ID
*/
-static const IntrinsicData* getIntrinsicWithChain(unsigned IntNo) {
+static const IntrinsicData* getIntrinsicWithChain(uint16_t IntNo) {
IntrinsicData IntrinsicToFind = {IntNo, INTR_NO_TYPE, 0, 0 };
const IntrinsicData *Data = std::lower_bound(std::begin(IntrinsicsWithChain),
@@ -258,49 +250,51 @@ static const IntrinsicData* getIntrinsicWithChain(unsigned IntNo) {
* the alphabetical order.
*/
static const IntrinsicData IntrinsicsWithoutChain[] = {
+ X86_INTRINSIC_DATA(avx_hadd_pd_256, INTR_TYPE_2OP, X86ISD::FHADD, 0),
+ X86_INTRINSIC_DATA(avx_hadd_ps_256, INTR_TYPE_2OP, X86ISD::FHADD, 0),
+ X86_INTRINSIC_DATA(avx_hsub_pd_256, INTR_TYPE_2OP, X86ISD::FHSUB, 0),
+ X86_INTRINSIC_DATA(avx_hsub_ps_256, INTR_TYPE_2OP, X86ISD::FHSUB, 0),
+ X86_INTRINSIC_DATA(avx_max_pd_256, INTR_TYPE_2OP, X86ISD::FMAX, 0),
+ X86_INTRINSIC_DATA(avx_max_ps_256, INTR_TYPE_2OP, X86ISD::FMAX, 0),
+ X86_INTRINSIC_DATA(avx_min_pd_256, INTR_TYPE_2OP, X86ISD::FMIN, 0),
+ X86_INTRINSIC_DATA(avx_min_ps_256, INTR_TYPE_2OP, X86ISD::FMIN, 0),
+ X86_INTRINSIC_DATA(avx_movmsk_pd_256, INTR_TYPE_1OP, X86ISD::MOVMSK, 0),
+ X86_INTRINSIC_DATA(avx_movmsk_ps_256, INTR_TYPE_1OP, X86ISD::MOVMSK, 0),
+ X86_INTRINSIC_DATA(avx_rcp_ps_256, INTR_TYPE_1OP, X86ISD::FRCP, 0),
+ X86_INTRINSIC_DATA(avx_rsqrt_ps_256, INTR_TYPE_1OP, X86ISD::FRSQRT, 0),
+ X86_INTRINSIC_DATA(avx_sqrt_pd_256, INTR_TYPE_1OP, ISD::FSQRT, 0),
+ X86_INTRINSIC_DATA(avx_sqrt_ps_256, INTR_TYPE_1OP, ISD::FSQRT, 0),
+ X86_INTRINSIC_DATA(avx_vperm2f128_pd_256, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
+ X86_INTRINSIC_DATA(avx_vperm2f128_ps_256, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
+ X86_INTRINSIC_DATA(avx_vperm2f128_si_256, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
+ X86_INTRINSIC_DATA(avx_vpermilvar_pd, INTR_TYPE_2OP, X86ISD::VPERMILPV, 0),
+ X86_INTRINSIC_DATA(avx_vpermilvar_pd_256, INTR_TYPE_2OP, X86ISD::VPERMILPV, 0),
+ X86_INTRINSIC_DATA(avx_vpermilvar_ps, INTR_TYPE_2OP, X86ISD::VPERMILPV, 0),
+ X86_INTRINSIC_DATA(avx_vpermilvar_ps_256, INTR_TYPE_2OP, X86ISD::VPERMILPV, 0),
+ X86_INTRINSIC_DATA(avx2_pabs_b, INTR_TYPE_1OP, X86ISD::ABS, 0),
+ X86_INTRINSIC_DATA(avx2_pabs_d, INTR_TYPE_1OP, X86ISD::ABS, 0),
+ X86_INTRINSIC_DATA(avx2_pabs_w, INTR_TYPE_1OP, X86ISD::ABS, 0),
X86_INTRINSIC_DATA(avx2_packssdw, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
X86_INTRINSIC_DATA(avx2_packsswb, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
X86_INTRINSIC_DATA(avx2_packusdw, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
X86_INTRINSIC_DATA(avx2_packuswb, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
+ X86_INTRINSIC_DATA(avx2_padds_b, INTR_TYPE_2OP, X86ISD::ADDS, 0),
+ X86_INTRINSIC_DATA(avx2_padds_w, INTR_TYPE_2OP, X86ISD::ADDS, 0),
+ X86_INTRINSIC_DATA(avx2_paddus_b, INTR_TYPE_2OP, X86ISD::ADDUS, 0),
+ X86_INTRINSIC_DATA(avx2_paddus_w, INTR_TYPE_2OP, X86ISD::ADDUS, 0),
X86_INTRINSIC_DATA(avx2_pavg_b, INTR_TYPE_2OP, X86ISD::AVG, 0),
X86_INTRINSIC_DATA(avx2_pavg_w, INTR_TYPE_2OP, X86ISD::AVG, 0),
X86_INTRINSIC_DATA(avx2_phadd_d, INTR_TYPE_2OP, X86ISD::HADD, 0),
X86_INTRINSIC_DATA(avx2_phadd_w, INTR_TYPE_2OP, X86ISD::HADD, 0),
X86_INTRINSIC_DATA(avx2_phsub_d, INTR_TYPE_2OP, X86ISD::HSUB, 0),
X86_INTRINSIC_DATA(avx2_phsub_w, INTR_TYPE_2OP, X86ISD::HSUB, 0),
- X86_INTRINSIC_DATA(avx2_pmaxs_b, INTR_TYPE_2OP, ISD::SMAX, 0),
- X86_INTRINSIC_DATA(avx2_pmaxs_d, INTR_TYPE_2OP, ISD::SMAX, 0),
- X86_INTRINSIC_DATA(avx2_pmaxs_w, INTR_TYPE_2OP, ISD::SMAX, 0),
- X86_INTRINSIC_DATA(avx2_pmaxu_b, INTR_TYPE_2OP, ISD::UMAX, 0),
- X86_INTRINSIC_DATA(avx2_pmaxu_d, INTR_TYPE_2OP, ISD::UMAX, 0),
- X86_INTRINSIC_DATA(avx2_pmaxu_w, INTR_TYPE_2OP, ISD::UMAX, 0),
- X86_INTRINSIC_DATA(avx2_pmins_b, INTR_TYPE_2OP, ISD::SMIN, 0),
- X86_INTRINSIC_DATA(avx2_pmins_d, INTR_TYPE_2OP, ISD::SMIN, 0),
- X86_INTRINSIC_DATA(avx2_pmins_w, INTR_TYPE_2OP, ISD::SMIN, 0),
- X86_INTRINSIC_DATA(avx2_pminu_b, INTR_TYPE_2OP, ISD::UMIN, 0),
- X86_INTRINSIC_DATA(avx2_pminu_d, INTR_TYPE_2OP, ISD::UMIN, 0),
- X86_INTRINSIC_DATA(avx2_pminu_w, INTR_TYPE_2OP, ISD::UMIN, 0),
- X86_INTRINSIC_DATA(avx2_pmovsxbd, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovsxbq, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovsxbw, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovsxdq, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovsxwd, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovsxwq, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovzxbd, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovzxbq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovzxbw, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovzxdq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovzxwd, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovzxwq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
+ X86_INTRINSIC_DATA(avx2_pmovmskb, INTR_TYPE_1OP, X86ISD::MOVMSK, 0),
X86_INTRINSIC_DATA(avx2_pmul_dq, INTR_TYPE_2OP, X86ISD::PMULDQ, 0),
X86_INTRINSIC_DATA(avx2_pmulh_w, INTR_TYPE_2OP, ISD::MULHS, 0),
X86_INTRINSIC_DATA(avx2_pmulhu_w, INTR_TYPE_2OP, ISD::MULHU, 0),
X86_INTRINSIC_DATA(avx2_pmulu_dq, INTR_TYPE_2OP, X86ISD::PMULUDQ, 0),
X86_INTRINSIC_DATA(avx2_psad_bw, INTR_TYPE_2OP, X86ISD::PSADBW, 0),
X86_INTRINSIC_DATA(avx2_pshuf_b, INTR_TYPE_2OP, X86ISD::PSHUFB, 0),
- X86_INTRINSIC_DATA(avx2_psign_b, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
- X86_INTRINSIC_DATA(avx2_psign_d, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
- X86_INTRINSIC_DATA(avx2_psign_w, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
X86_INTRINSIC_DATA(avx2_psll_d, INTR_TYPE_2OP, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx2_psll_q, INTR_TYPE_2OP, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx2_psll_w, INTR_TYPE_2OP, X86ISD::VSHL, 0),
@@ -315,8 +309,8 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx2_psra_w, INTR_TYPE_2OP, X86ISD::VSRA, 0),
X86_INTRINSIC_DATA(avx2_psrai_d, VSHIFT, X86ISD::VSRAI, 0),
X86_INTRINSIC_DATA(avx2_psrai_w, VSHIFT, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx2_psrav_d, INTR_TYPE_2OP, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx2_psrav_d_256, INTR_TYPE_2OP, ISD::SRA, 0),
+ X86_INTRINSIC_DATA(avx2_psrav_d, INTR_TYPE_2OP, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx2_psrav_d_256, INTR_TYPE_2OP, X86ISD::VSRAV, 0),
X86_INTRINSIC_DATA(avx2_psrl_d, INTR_TYPE_2OP, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx2_psrl_q, INTR_TYPE_2OP, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx2_psrl_w, INTR_TYPE_2OP, X86ISD::VSRL, 0),
@@ -327,6 +321,8 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx2_psrlv_d_256, INTR_TYPE_2OP, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx2_psrlv_q, INTR_TYPE_2OP, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx2_psrlv_q_256, INTR_TYPE_2OP, ISD::SRL, 0),
+ X86_INTRINSIC_DATA(avx2_psubs_b, INTR_TYPE_2OP, X86ISD::SUBS, 0),
+ X86_INTRINSIC_DATA(avx2_psubs_w, INTR_TYPE_2OP, X86ISD::SUBS, 0),
X86_INTRINSIC_DATA(avx2_psubus_b, INTR_TYPE_2OP, X86ISD::SUBUS, 0),
X86_INTRINSIC_DATA(avx2_psubus_w, INTR_TYPE_2OP, X86ISD::SUBUS, 0),
X86_INTRINSIC_DATA(avx2_vperm2i128, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
@@ -380,50 +376,6 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_kunpck_bw, KUNPCK, ISD::CONCAT_VECTORS, 0),
X86_INTRINSIC_DATA(avx512_kunpck_dq, KUNPCK, ISD::CONCAT_VECTORS, 0),
X86_INTRINSIC_DATA(avx512_kunpck_wd, KUNPCK, ISD::CONCAT_VECTORS, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmadd_pd_128, FMA_OP_MASK3, X86ISD::FMADD, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmadd_pd_256, FMA_OP_MASK3, X86ISD::FMADD, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmadd_pd_512, FMA_OP_MASK3, X86ISD::FMADD,
- X86ISD::FMADD_RND),
- X86_INTRINSIC_DATA(avx512_mask3_vfmadd_ps_128, FMA_OP_MASK3, X86ISD::FMADD, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmadd_ps_256, FMA_OP_MASK3, X86ISD::FMADD, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmadd_ps_512, FMA_OP_MASK3, X86ISD::FMADD,
- X86ISD::FMADD_RND),
-
- X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_pd_128, FMA_OP_MASK3, X86ISD::FMADDSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_pd_256, FMA_OP_MASK3, X86ISD::FMADDSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_pd_512, FMA_OP_MASK3, X86ISD::FMADDSUB,
- X86ISD::FMADDSUB_RND),
- X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_ps_128, FMA_OP_MASK3, X86ISD::FMADDSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_ps_256, FMA_OP_MASK3, X86ISD::FMADDSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_ps_512, FMA_OP_MASK3, X86ISD::FMADDSUB,
- X86ISD::FMADDSUB_RND),
-
- X86_INTRINSIC_DATA(avx512_mask3_vfmsub_pd_128, FMA_OP_MASK3, X86ISD::FMSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsub_pd_256, FMA_OP_MASK3, X86ISD::FMSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsub_pd_512, FMA_OP_MASK3, X86ISD::FMSUB,
- X86ISD::FMSUB_RND),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsub_ps_128, FMA_OP_MASK3, X86ISD::FMSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsub_ps_256, FMA_OP_MASK3, X86ISD::FMSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsub_ps_512, FMA_OP_MASK3, X86ISD::FMSUB,
- X86ISD::FMSUB_RND),
-
- X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_pd_128, FMA_OP_MASK3, X86ISD::FMSUBADD, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_pd_256, FMA_OP_MASK3, X86ISD::FMSUBADD, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_pd_512, FMA_OP_MASK3, X86ISD::FMSUBADD,
- X86ISD::FMSUBADD_RND),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_ps_128, FMA_OP_MASK3, X86ISD::FMSUBADD, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_ps_256, FMA_OP_MASK3, X86ISD::FMSUBADD, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_ps_512, FMA_OP_MASK3, X86ISD::FMSUBADD,
- X86ISD::FMSUBADD_RND),
-
- X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_pd_128, FMA_OP_MASK3, X86ISD::FNMSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_pd_256, FMA_OP_MASK3, X86ISD::FNMSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_pd_512, FMA_OP_MASK3, X86ISD::FNMSUB,
- X86ISD::FNMSUB_RND),
- X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_ps_128, FMA_OP_MASK3, X86ISD::FNMSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_ps_256, FMA_OP_MASK3, X86ISD::FNMSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_ps_512, FMA_OP_MASK3, X86ISD::FNMSUB,
- X86ISD::FNMSUB_RND),
X86_INTRINSIC_DATA(avx512_mask_add_pd_128, INTR_TYPE_2OP_MASK, ISD::FADD, 0),
X86_INTRINSIC_DATA(avx512_mask_add_pd_256, INTR_TYPE_2OP_MASK, ISD::FADD, 0),
@@ -449,38 +401,10 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_andn_ps_128, INTR_TYPE_2OP_MASK, X86ISD::FANDN, 0),
X86_INTRINSIC_DATA(avx512_mask_andn_ps_256, INTR_TYPE_2OP_MASK, X86ISD::FANDN, 0),
X86_INTRINSIC_DATA(avx512_mask_andn_ps_512, INTR_TYPE_2OP_MASK, X86ISD::FANDN, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_b_128, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_b_256, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_b_512, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_d_128, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_d_256, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_d_512, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_pd_128, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_pd_256, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_pd_512, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_ps_128, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_ps_256, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_ps_512, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_q_128, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_q_256, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_q_512, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_w_128, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_w_256, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_w_512, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcast_sd_pd_256, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcast_sd_pd_512, INTR_TYPE_1OP_MASK,
+ X86_INTRINSIC_DATA(avx512_mask_broadcastf32x2_256, BRCST32x2_TO_VEC,
X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcast_ss_ps_128, INTR_TYPE_1OP_MASK,
+ X86_INTRINSIC_DATA(avx512_mask_broadcastf32x2_512, BRCST32x2_TO_VEC,
X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcast_ss_ps_256, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcast_ss_ps_512, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcastf32x2_256, INTR_TYPE_1OP_MASK,
- X86ISD::SUBV_BROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcastf32x2_512, INTR_TYPE_1OP_MASK,
- X86ISD::SUBV_BROADCAST, 0),
X86_INTRINSIC_DATA(avx512_mask_broadcastf32x4_256, BRCST_SUBVEC_TO_VEC,
X86ISD::SHUF128, 0),
X86_INTRINSIC_DATA(avx512_mask_broadcastf32x4_512, BRCST_SUBVEC_TO_VEC,
@@ -493,12 +417,12 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::SHUF128, 0),
X86_INTRINSIC_DATA(avx512_mask_broadcastf64x4_512, BRCST_SUBVEC_TO_VEC,
X86ISD::SHUF128, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcasti32x2_128, INTR_TYPE_1OP_MASK,
- X86ISD::SUBV_BROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcasti32x2_256, INTR_TYPE_1OP_MASK,
- X86ISD::SUBV_BROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcasti32x2_512, INTR_TYPE_1OP_MASK,
- X86ISD::SUBV_BROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_broadcasti32x2_128, BRCST32x2_TO_VEC,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_broadcasti32x2_256, BRCST32x2_TO_VEC,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_broadcasti32x2_512, BRCST32x2_TO_VEC,
+ X86ISD::VBROADCAST, 0),
X86_INTRINSIC_DATA(avx512_mask_broadcasti32x4_256, BRCST_SUBVEC_TO_VEC,
X86ISD::SHUF128, 0),
X86_INTRINSIC_DATA(avx512_mask_broadcasti32x4_512, BRCST_SUBVEC_TO_VEC,
@@ -773,6 +697,14 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_q_512, COMPRESS_EXPAND_IN_REG,
X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_fixupimm_pd_128, FIXUPIMM, X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_fixupimm_pd_256, FIXUPIMM, X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_fixupimm_pd_512, FIXUPIMM, X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_fixupimm_ps_128, FIXUPIMM, X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_fixupimm_ps_256, FIXUPIMM, X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_fixupimm_ps_512, FIXUPIMM, X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_fixupimm_sd, FIXUPIMMS, X86ISD::VFIXUPIMMS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_fixupimm_ss, FIXUPIMMS, X86ISD::VFIXUPIMMS, 0),
X86_INTRINSIC_DATA(avx512_mask_fpclass_pd_128, FPCLASS, X86ISD::VFPCLASS, 0),
X86_INTRINSIC_DATA(avx512_mask_fpclass_pd_256, FPCLASS, X86ISD::VFPCLASS, 0),
X86_INTRINSIC_DATA(avx512_mask_fpclass_pd_512, FPCLASS, X86ISD::VFPCLASS, 0),
@@ -873,28 +805,10 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::FMIN, X86ISD::FMIN_RND),
X86_INTRINSIC_DATA(avx512_mask_min_ss_round, INTR_TYPE_SCALAR_MASK_RM,
X86ISD::FMIN, X86ISD::FMIN_RND),
- X86_INTRINSIC_DATA(avx512_mask_movddup_128, INTR_TYPE_1OP_MASK,
- X86ISD::MOVDDUP, 0),
- X86_INTRINSIC_DATA(avx512_mask_movddup_256, INTR_TYPE_1OP_MASK,
- X86ISD::MOVDDUP, 0),
- X86_INTRINSIC_DATA(avx512_mask_movddup_512, INTR_TYPE_1OP_MASK,
- X86ISD::MOVDDUP, 0),
- X86_INTRINSIC_DATA(avx512_mask_move_sd, INTR_TYPE_SCALAR_MASK,
+ X86_INTRINSIC_DATA(avx512_mask_move_sd, INTR_TYPE_SCALAR_MASK,
X86ISD::MOVSD, 0),
- X86_INTRINSIC_DATA(avx512_mask_move_ss, INTR_TYPE_SCALAR_MASK,
+ X86_INTRINSIC_DATA(avx512_mask_move_ss, INTR_TYPE_SCALAR_MASK,
X86ISD::MOVSS, 0),
- X86_INTRINSIC_DATA(avx512_mask_movshdup_128, INTR_TYPE_1OP_MASK,
- X86ISD::MOVSHDUP, 0),
- X86_INTRINSIC_DATA(avx512_mask_movshdup_256, INTR_TYPE_1OP_MASK,
- X86ISD::MOVSHDUP, 0),
- X86_INTRINSIC_DATA(avx512_mask_movshdup_512, INTR_TYPE_1OP_MASK,
- X86ISD::MOVSHDUP, 0),
- X86_INTRINSIC_DATA(avx512_mask_movsldup_128, INTR_TYPE_1OP_MASK,
- X86ISD::MOVSLDUP, 0),
- X86_INTRINSIC_DATA(avx512_mask_movsldup_256, INTR_TYPE_1OP_MASK,
- X86ISD::MOVSLDUP, 0),
- X86_INTRINSIC_DATA(avx512_mask_movsldup_512, INTR_TYPE_1OP_MASK,
- X86ISD::MOVSLDUP, 0),
X86_INTRINSIC_DATA(avx512_mask_mul_pd_128, INTR_TYPE_2OP_MASK, ISD::FMUL, 0),
X86_INTRINSIC_DATA(avx512_mask_mul_pd_256, INTR_TYPE_2OP_MASK, ISD::FMUL, 0),
X86_INTRINSIC_DATA(avx512_mask_mul_pd_512, INTR_TYPE_2OP_MASK, ISD::FMUL,
@@ -961,54 +875,64 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_paddus_w_128, INTR_TYPE_2OP_MASK, X86ISD::ADDUS, 0),
X86_INTRINSIC_DATA(avx512_mask_paddus_w_256, INTR_TYPE_2OP_MASK, X86ISD::ADDUS, 0),
X86_INTRINSIC_DATA(avx512_mask_paddus_w_512, INTR_TYPE_2OP_MASK, X86ISD::ADDUS, 0),
- X86_INTRINSIC_DATA(avx512_mask_palignr_128, INTR_TYPE_3OP_IMM8_MASK,
- X86ISD::PALIGNR, 0),
- X86_INTRINSIC_DATA(avx512_mask_palignr_256, INTR_TYPE_3OP_IMM8_MASK,
- X86ISD::PALIGNR, 0),
- X86_INTRINSIC_DATA(avx512_mask_palignr_512, INTR_TYPE_3OP_IMM8_MASK,
- X86ISD::PALIGNR, 0),
- X86_INTRINSIC_DATA(avx512_mask_pand_d_128, INTR_TYPE_2OP_MASK, ISD::AND, 0),
- X86_INTRINSIC_DATA(avx512_mask_pand_d_256, INTR_TYPE_2OP_MASK, ISD::AND, 0),
- X86_INTRINSIC_DATA(avx512_mask_pand_d_512, INTR_TYPE_2OP_MASK, ISD::AND, 0),
- X86_INTRINSIC_DATA(avx512_mask_pand_q_128, INTR_TYPE_2OP_MASK, ISD::AND, 0),
- X86_INTRINSIC_DATA(avx512_mask_pand_q_256, INTR_TYPE_2OP_MASK, ISD::AND, 0),
- X86_INTRINSIC_DATA(avx512_mask_pand_q_512, INTR_TYPE_2OP_MASK, ISD::AND, 0),
- X86_INTRINSIC_DATA(avx512_mask_pandn_d_128, INTR_TYPE_2OP_MASK, X86ISD::ANDNP, 0),
- X86_INTRINSIC_DATA(avx512_mask_pandn_d_256, INTR_TYPE_2OP_MASK, X86ISD::ANDNP, 0),
- X86_INTRINSIC_DATA(avx512_mask_pandn_d_512, INTR_TYPE_2OP_MASK, X86ISD::ANDNP, 0),
- X86_INTRINSIC_DATA(avx512_mask_pandn_q_128, INTR_TYPE_2OP_MASK, X86ISD::ANDNP, 0),
- X86_INTRINSIC_DATA(avx512_mask_pandn_q_256, INTR_TYPE_2OP_MASK, X86ISD::ANDNP, 0),
- X86_INTRINSIC_DATA(avx512_mask_pandn_q_512, INTR_TYPE_2OP_MASK, X86ISD::ANDNP, 0),
X86_INTRINSIC_DATA(avx512_mask_pavg_b_128, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
X86_INTRINSIC_DATA(avx512_mask_pavg_b_256, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
X86_INTRINSIC_DATA(avx512_mask_pavg_b_512, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
X86_INTRINSIC_DATA(avx512_mask_pavg_w_128, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
X86_INTRINSIC_DATA(avx512_mask_pavg_w_256, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
X86_INTRINSIC_DATA(avx512_mask_pavg_w_512, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_b_128, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_b_256, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_b_512, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_d_128, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_d_256, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_d_512, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_q_128, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_q_256, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_q_512, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_w_128, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_w_256, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_w_512, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_b_128, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_b_256, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_b_512, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_d_128, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_d_256, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_d_512, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_q_128, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_q_256, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_q_512, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_128, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_256, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_512, CMP_MASK, X86ISD::PCMPGTM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_b_gpr_128, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_b_gpr_256, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_b_gpr_512, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_d_gpr_128, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_d_gpr_256, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_d_gpr_512, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_q_gpr_128, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_q_gpr_256, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_q_gpr_512, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_w_gpr_128, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_w_gpr_256, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_w_gpr_512, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_df_256, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_df_512, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_di_256, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_di_512, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_hi_128, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_hi_256, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_hi_512, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_qi_128, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_qi_256, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_qi_512, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_sf_256, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_sf_512, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_si_256, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_si_512, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
X86_INTRINSIC_DATA(avx512_mask_pmaddubs_w_128, INTR_TYPE_2OP_MASK,
X86ISD::VPMADDUBSW, 0),
X86_INTRINSIC_DATA(avx512_mask_pmaddubs_w_256, INTR_TYPE_2OP_MASK,
@@ -1273,36 +1197,36 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_pmull_w_128, INTR_TYPE_2OP_MASK, ISD::MUL, 0),
X86_INTRINSIC_DATA(avx512_mask_pmull_w_256, INTR_TYPE_2OP_MASK, ISD::MUL, 0),
X86_INTRINSIC_DATA(avx512_mask_pmull_w_512, INTR_TYPE_2OP_MASK, ISD::MUL, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmultishift_qb_128, INTR_TYPE_2OP_MASK,
+ X86ISD::MULTISHIFT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmultishift_qb_256, INTR_TYPE_2OP_MASK,
+ X86ISD::MULTISHIFT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmultishift_qb_512, INTR_TYPE_2OP_MASK,
+ X86ISD::MULTISHIFT, 0),
X86_INTRINSIC_DATA(avx512_mask_pmulu_dq_128, INTR_TYPE_2OP_MASK,
X86ISD::PMULUDQ, 0),
X86_INTRINSIC_DATA(avx512_mask_pmulu_dq_256, INTR_TYPE_2OP_MASK,
X86ISD::PMULUDQ, 0),
X86_INTRINSIC_DATA(avx512_mask_pmulu_dq_512, INTR_TYPE_2OP_MASK,
X86ISD::PMULUDQ, 0),
- X86_INTRINSIC_DATA(avx512_mask_por_d_128, INTR_TYPE_2OP_MASK, ISD::OR, 0),
- X86_INTRINSIC_DATA(avx512_mask_por_d_256, INTR_TYPE_2OP_MASK, ISD::OR, 0),
- X86_INTRINSIC_DATA(avx512_mask_por_d_512, INTR_TYPE_2OP_MASK, ISD::OR, 0),
- X86_INTRINSIC_DATA(avx512_mask_por_q_128, INTR_TYPE_2OP_MASK, ISD::OR, 0),
- X86_INTRINSIC_DATA(avx512_mask_por_q_256, INTR_TYPE_2OP_MASK, ISD::OR, 0),
- X86_INTRINSIC_DATA(avx512_mask_por_q_512, INTR_TYPE_2OP_MASK, ISD::OR, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_d_128, INTR_TYPE_2OP_MASK, X86ISD::VROTLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_d_256, INTR_TYPE_2OP_MASK, X86ISD::VROTLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_d_512, INTR_TYPE_2OP_MASK, X86ISD::VROTLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_q_128, INTR_TYPE_2OP_MASK, X86ISD::VROTLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_q_256, INTR_TYPE_2OP_MASK, X86ISD::VROTLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_q_512, INTR_TYPE_2OP_MASK, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_prol_d_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_prol_d_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_prol_d_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_prol_q_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_prol_q_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_prol_q_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
X86_INTRINSIC_DATA(avx512_mask_prolv_d_128, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
X86_INTRINSIC_DATA(avx512_mask_prolv_d_256, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
X86_INTRINSIC_DATA(avx512_mask_prolv_d_512, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
X86_INTRINSIC_DATA(avx512_mask_prolv_q_128, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
X86_INTRINSIC_DATA(avx512_mask_prolv_q_256, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
X86_INTRINSIC_DATA(avx512_mask_prolv_q_512, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_d_128, INTR_TYPE_2OP_MASK, X86ISD::VROTRI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_d_256, INTR_TYPE_2OP_MASK, X86ISD::VROTRI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_d_512, INTR_TYPE_2OP_MASK, X86ISD::VROTRI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_q_128, INTR_TYPE_2OP_MASK, X86ISD::VROTRI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_q_256, INTR_TYPE_2OP_MASK, X86ISD::VROTRI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_q_512, INTR_TYPE_2OP_MASK, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pror_d_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pror_d_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pror_d_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pror_q_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pror_q_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pror_q_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
X86_INTRINSIC_DATA(avx512_mask_prorv_d_128, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
X86_INTRINSIC_DATA(avx512_mask_prorv_d_256, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
X86_INTRINSIC_DATA(avx512_mask_prorv_d_512, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
@@ -1315,44 +1239,26 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::PSHUFB, 0),
X86_INTRINSIC_DATA(avx512_mask_pshuf_b_512, INTR_TYPE_2OP_MASK,
X86ISD::PSHUFB, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshuf_d_128, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFD, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshuf_d_256, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFD, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshuf_d_512, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFD, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshufh_w_128, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFHW, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshufh_w_256, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFHW, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshufh_w_512, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFHW, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshufl_w_128, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFLW, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshufl_w_256, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFLW, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshufl_w_512, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFLW, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_d, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_d_128, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_d_256, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_di_128, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_di_256, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_di_512, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_di_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_di_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_di_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_q, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_q_128, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_q_256, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_qi_128, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_qi_256, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_qi_512, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_qi_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_qi_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_qi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_w_128, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_w_256, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_w_512, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_wi_128, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_wi_256, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_wi_512, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pslli_d, VSHIFT_MASK, X86ISD::VSHLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pslli_q, VSHIFT_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_wi_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_wi_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_wi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psllv_d, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psllv_q, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psllv16_hi, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psllv2_di, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psllv32hi, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
@@ -1360,57 +1266,53 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_psllv4_si, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psllv8_hi, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psllv8_si, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psllv_d, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psllv_q, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_d, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_d_128, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_d_256, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_di_128, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_di_256, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_di_512, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_di_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_di_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_di_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_q, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_q_128, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_q_256, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_qi_128, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_qi_256, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_qi_512, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_qi_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_qi_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_qi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_w_128, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_w_256, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_w_512, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_wi_128, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_wi_256, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_wi_512, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrai_d, VSHIFT_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrai_q, VSHIFT_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav16_hi, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav32_hi, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav4_si, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav8_hi, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav8_si, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav_d, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav_q, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav_q_128, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav_q_256, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_wi_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_wi_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_wi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav_d, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav_q, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav_q_128, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav_q_256, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav16_hi, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav32_hi, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav4_si, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav8_hi, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav8_si, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_d, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_d_128, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_d_256, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_di_128, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_di_256, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_di_512, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_di_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_di_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_di_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_q, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_q_128, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_q_256, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_qi_128, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_qi_256, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_qi_512, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_qi_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_qi_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_qi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_w_128, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_w_256, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_w_512, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_wi_128, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_wi_256, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_wi_512, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrli_d, VSHIFT_MASK, X86ISD::VSRLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrli_q, VSHIFT_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_wi_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_wi_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_wi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrlv_d, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrlv_q, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrlv16_hi, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrlv2_di, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrlv32hi, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
@@ -1418,8 +1320,6 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_psrlv4_si, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrlv8_hi, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrlv8_si, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrlv_d, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrlv_q, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psub_b_128, INTR_TYPE_2OP_MASK, ISD::SUB, 0),
X86_INTRINSIC_DATA(avx512_mask_psub_b_256, INTR_TYPE_2OP_MASK, ISD::SUB, 0),
X86_INTRINSIC_DATA(avx512_mask_psub_b_512, INTR_TYPE_2OP_MASK, ISD::SUB, 0),
@@ -1456,60 +1356,6 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::VPTERNLOG, 0),
X86_INTRINSIC_DATA(avx512_mask_pternlog_q_512, TERLOG_OP_MASK,
X86ISD::VPTERNLOG, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhb_w_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhb_w_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhb_w_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhd_q_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhd_q_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhd_q_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhqd_q_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhqd_q_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhqd_q_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhw_d_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhw_d_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhw_d_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklb_w_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklb_w_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklb_w_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckld_q_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckld_q_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckld_q_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklqd_q_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklqd_q_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklqd_q_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklw_d_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklw_d_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklw_d_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_pxor_d_128, INTR_TYPE_2OP_MASK, ISD::XOR, 0),
- X86_INTRINSIC_DATA(avx512_mask_pxor_d_256, INTR_TYPE_2OP_MASK, ISD::XOR, 0),
- X86_INTRINSIC_DATA(avx512_mask_pxor_d_512, INTR_TYPE_2OP_MASK, ISD::XOR, 0),
- X86_INTRINSIC_DATA(avx512_mask_pxor_q_128, INTR_TYPE_2OP_MASK, ISD::XOR, 0),
- X86_INTRINSIC_DATA(avx512_mask_pxor_q_256, INTR_TYPE_2OP_MASK, ISD::XOR, 0),
- X86_INTRINSIC_DATA(avx512_mask_pxor_q_512, INTR_TYPE_2OP_MASK, ISD::XOR, 0),
X86_INTRINSIC_DATA(avx512_mask_range_pd_128, INTR_TYPE_3OP_MASK_RM, X86ISD::VRANGE, 0),
X86_INTRINSIC_DATA(avx512_mask_range_pd_256, INTR_TYPE_3OP_MASK_RM, X86ISD::VRANGE, 0),
X86_INTRINSIC_DATA(avx512_mask_range_pd_512, INTR_TYPE_3OP_MASK_RM, X86ISD::VRANGE, 0),
@@ -1549,9 +1395,9 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_scalef_ps_512, INTR_TYPE_2OP_MASK_RM,
X86ISD::SCALEF, 0),
X86_INTRINSIC_DATA(avx512_mask_scalef_sd, INTR_TYPE_SCALAR_MASK_RM,
- X86ISD::SCALEF, 0),
+ X86ISD::SCALEFS, 0),
X86_INTRINSIC_DATA(avx512_mask_scalef_ss, INTR_TYPE_SCALAR_MASK_RM,
- X86ISD::SCALEF, 0),
+ X86ISD::SCALEFS, 0),
X86_INTRINSIC_DATA(avx512_mask_shuf_f32x4, INTR_TYPE_3OP_IMM8_MASK,
X86ISD::SHUF128, 0),
X86_INTRINSIC_DATA(avx512_mask_shuf_f32x4_256, INTR_TYPE_3OP_IMM8_MASK,
@@ -1616,30 +1462,6 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_ucmp_w_128, CMP_MASK_CC, X86ISD::CMPMU, 0),
X86_INTRINSIC_DATA(avx512_mask_ucmp_w_256, CMP_MASK_CC, X86ISD::CMPMU, 0),
X86_INTRINSIC_DATA(avx512_mask_ucmp_w_512, CMP_MASK_CC, X86ISD::CMPMU, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckh_pd_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckh_pd_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckh_pd_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckh_ps_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckh_ps_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckh_ps_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckl_pd_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckl_pd_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckl_pd_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckl_ps_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckl_ps_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckl_ps_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
X86_INTRINSIC_DATA(avx512_mask_valign_d_128, INTR_TYPE_3OP_IMM8_MASK,
X86ISD::VALIGN, 0),
X86_INTRINSIC_DATA(avx512_mask_valign_d_256, INTR_TYPE_3OP_IMM8_MASK,
@@ -1673,6 +1495,8 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_vfmadd_ps_512, FMA_OP_MASK, X86ISD::FMADD,
X86ISD::FMADD_RND),
+ X86_INTRINSIC_DATA(avx512_mask_vfmadd_sd, FMA_OP_SCALAR_MASK, X86ISD::FMADD_RND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vfmadd_ss, FMA_OP_SCALAR_MASK, X86ISD::FMADD_RND, 0),
X86_INTRINSIC_DATA(avx512_mask_vfmaddsub_pd_128, FMA_OP_MASK, X86ISD::FMADDSUB, 0),
X86_INTRINSIC_DATA(avx512_mask_vfmaddsub_pd_256, FMA_OP_MASK, X86ISD::FMADDSUB, 0),
X86_INTRINSIC_DATA(avx512_mask_vfmaddsub_pd_512, FMA_OP_MASK, X86ISD::FMADDSUB,
@@ -1730,18 +1554,12 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::VPERMIV3, 0),
X86_INTRINSIC_DATA(avx512_mask_vpermi2var_q_512, VPERM_3OP_MASK,
X86ISD::VPERMIV3, 0),
- X86_INTRINSIC_DATA(avx512_mask_vpermil_pd_128, INTR_TYPE_2OP_IMM8_MASK,
- X86ISD::VPERMILPI, 0),
- X86_INTRINSIC_DATA(avx512_mask_vpermil_pd_256, INTR_TYPE_2OP_IMM8_MASK,
- X86ISD::VPERMILPI, 0),
- X86_INTRINSIC_DATA(avx512_mask_vpermil_pd_512, INTR_TYPE_2OP_IMM8_MASK,
- X86ISD::VPERMILPI, 0),
- X86_INTRINSIC_DATA(avx512_mask_vpermil_ps_128, INTR_TYPE_2OP_IMM8_MASK,
- X86ISD::VPERMILPI, 0),
- X86_INTRINSIC_DATA(avx512_mask_vpermil_ps_256, INTR_TYPE_2OP_IMM8_MASK,
- X86ISD::VPERMILPI, 0),
- X86_INTRINSIC_DATA(avx512_mask_vpermil_ps_512, INTR_TYPE_2OP_IMM8_MASK,
- X86ISD::VPERMILPI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpermi2var_qi_128, VPERM_3OP_MASK,
+ X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpermi2var_qi_256, VPERM_3OP_MASK,
+ X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpermi2var_qi_512, VPERM_3OP_MASK,
+ X86ISD::VPERMV3, 0),
X86_INTRINSIC_DATA(avx512_mask_vpermilvar_pd_128, INTR_TYPE_2OP_MASK,
X86ISD::VPERMILPV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpermilvar_pd_256, INTR_TYPE_2OP_MASK,
@@ -1784,12 +1602,92 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::VPERMV3, 0),
X86_INTRINSIC_DATA(avx512_mask_vpermt2var_q_512, VPERM_3OP_MASK,
X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpermt2var_qi_128, VPERM_3OP_MASK,
+ X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpermt2var_qi_256, VPERM_3OP_MASK,
+ X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpermt2var_qi_512, VPERM_3OP_MASK,
+ X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52h_uq_128 , FMA_OP_MASK,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52h_uq_256 , FMA_OP_MASK,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52h_uq_512 , FMA_OP_MASK,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52l_uq_128 , FMA_OP_MASK,
+ X86ISD::VPMADD52L, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52l_uq_256 , FMA_OP_MASK,
+ X86ISD::VPMADD52L, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52l_uq_512 , FMA_OP_MASK,
+ X86ISD::VPMADD52L, 0),
X86_INTRINSIC_DATA(avx512_mask_xor_pd_128, INTR_TYPE_2OP_MASK, X86ISD::FXOR, 0),
X86_INTRINSIC_DATA(avx512_mask_xor_pd_256, INTR_TYPE_2OP_MASK, X86ISD::FXOR, 0),
X86_INTRINSIC_DATA(avx512_mask_xor_pd_512, INTR_TYPE_2OP_MASK, X86ISD::FXOR, 0),
X86_INTRINSIC_DATA(avx512_mask_xor_ps_128, INTR_TYPE_2OP_MASK, X86ISD::FXOR, 0),
X86_INTRINSIC_DATA(avx512_mask_xor_ps_256, INTR_TYPE_2OP_MASK, X86ISD::FXOR, 0),
X86_INTRINSIC_DATA(avx512_mask_xor_ps_512, INTR_TYPE_2OP_MASK, X86ISD::FXOR, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmadd_pd_128, FMA_OP_MASK3, X86ISD::FMADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmadd_pd_256, FMA_OP_MASK3, X86ISD::FMADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmadd_pd_512, FMA_OP_MASK3, X86ISD::FMADD,
+ X86ISD::FMADD_RND),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmadd_ps_128, FMA_OP_MASK3, X86ISD::FMADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmadd_ps_256, FMA_OP_MASK3, X86ISD::FMADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmadd_ps_512, FMA_OP_MASK3, X86ISD::FMADD,
+ X86ISD::FMADD_RND),
+
+ X86_INTRINSIC_DATA(avx512_mask3_vfmadd_sd, FMA_OP_SCALAR_MASK3, X86ISD::FMADD_RND, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmadd_ss, FMA_OP_SCALAR_MASK3, X86ISD::FMADD_RND, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_pd_128, FMA_OP_MASK3, X86ISD::FMADDSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_pd_256, FMA_OP_MASK3, X86ISD::FMADDSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_pd_512, FMA_OP_MASK3, X86ISD::FMADDSUB,
+ X86ISD::FMADDSUB_RND),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_ps_128, FMA_OP_MASK3, X86ISD::FMADDSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_ps_256, FMA_OP_MASK3, X86ISD::FMADDSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_ps_512, FMA_OP_MASK3, X86ISD::FMADDSUB,
+ X86ISD::FMADDSUB_RND),
+
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsub_pd_128, FMA_OP_MASK3, X86ISD::FMSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsub_pd_256, FMA_OP_MASK3, X86ISD::FMSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsub_pd_512, FMA_OP_MASK3, X86ISD::FMSUB,
+ X86ISD::FMSUB_RND),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsub_ps_128, FMA_OP_MASK3, X86ISD::FMSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsub_ps_256, FMA_OP_MASK3, X86ISD::FMSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsub_ps_512, FMA_OP_MASK3, X86ISD::FMSUB,
+ X86ISD::FMSUB_RND),
+
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_pd_128, FMA_OP_MASK3, X86ISD::FMSUBADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_pd_256, FMA_OP_MASK3, X86ISD::FMSUBADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_pd_512, FMA_OP_MASK3, X86ISD::FMSUBADD,
+ X86ISD::FMSUBADD_RND),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_ps_128, FMA_OP_MASK3, X86ISD::FMSUBADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_ps_256, FMA_OP_MASK3, X86ISD::FMSUBADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_ps_512, FMA_OP_MASK3, X86ISD::FMSUBADD,
+ X86ISD::FMSUBADD_RND),
+
+ X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_pd_128, FMA_OP_MASK3, X86ISD::FNMSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_pd_256, FMA_OP_MASK3, X86ISD::FNMSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_pd_512, FMA_OP_MASK3, X86ISD::FNMSUB,
+ X86ISD::FNMSUB_RND),
+ X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_ps_128, FMA_OP_MASK3, X86ISD::FNMSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_ps_256, FMA_OP_MASK3, X86ISD::FNMSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_ps_512, FMA_OP_MASK3, X86ISD::FNMSUB,
+ X86ISD::FNMSUB_RND),
+ X86_INTRINSIC_DATA(avx512_maskz_fixupimm_pd_128, FIXUPIMM_MASKZ,
+ X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_fixupimm_pd_256, FIXUPIMM_MASKZ,
+ X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_fixupimm_pd_512, FIXUPIMM_MASKZ,
+ X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_fixupimm_ps_128, FIXUPIMM_MASKZ,
+ X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_fixupimm_ps_256, FIXUPIMM_MASKZ,
+ X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_fixupimm_ps_512, FIXUPIMM_MASKZ,
+ X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_fixupimm_sd, FIXUPIMMS_MASKZ,
+ X86ISD::VFIXUPIMMS, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_fixupimm_ss, FIXUPIMMS_MASKZ,
+ X86ISD::VFIXUPIMMS, 0),
X86_INTRINSIC_DATA(avx512_maskz_pternlog_d_128, TERLOG_OP_MASKZ,
X86ISD::VPTERNLOG, 0),
X86_INTRINSIC_DATA(avx512_maskz_pternlog_d_256, TERLOG_OP_MASKZ,
@@ -1811,6 +1709,8 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_maskz_vfmadd_ps_512, FMA_OP_MASKZ, X86ISD::FMADD,
X86ISD::FMADD_RND),
+ X86_INTRINSIC_DATA(avx512_maskz_vfmadd_sd, FMA_OP_SCALAR_MASKZ, X86ISD::FMADD_RND, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vfmadd_ss, FMA_OP_SCALAR_MASKZ, X86ISD::FMADD_RND, 0),
X86_INTRINSIC_DATA(avx512_maskz_vfmaddsub_pd_128, FMA_OP_MASKZ, X86ISD::FMADDSUB, 0),
X86_INTRINSIC_DATA(avx512_maskz_vfmaddsub_pd_256, FMA_OP_MASKZ, X86ISD::FMADDSUB, 0),
X86_INTRINSIC_DATA(avx512_maskz_vfmaddsub_pd_512, FMA_OP_MASKZ, X86ISD::FMADDSUB,
@@ -1850,41 +1750,57 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::VPERMV3, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpermt2var_q_512, VPERM_3OP_MASKZ,
X86ISD::VPERMV3, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastb_128, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastb_256, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastb_512, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastd_128, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastd_256, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastd_512, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastq_128, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastq_256, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastq_512, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastw_128, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastw_256, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastw_512, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpermt2var_qi_128, VPERM_3OP_MASKZ,
+ X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpermt2var_qi_256, VPERM_3OP_MASKZ,
+ X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpermt2var_qi_512, VPERM_3OP_MASKZ,
+ X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52h_uq_128, FMA_OP_MASKZ,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52h_uq_256, FMA_OP_MASKZ,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52h_uq_512, FMA_OP_MASKZ,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52l_uq_128, FMA_OP_MASKZ,
+ X86ISD::VPMADD52L, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52l_uq_256, FMA_OP_MASKZ,
+ X86ISD::VPMADD52L, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52l_uq_512, FMA_OP_MASKZ,
+ X86ISD::VPMADD52L, 0),
X86_INTRINSIC_DATA(avx512_psad_bw_512, INTR_TYPE_2OP, X86ISD::PSADBW, 0),
- X86_INTRINSIC_DATA(avx512_psll_dq_512, INTR_TYPE_2OP_IMM8, X86ISD::VSHLDQ, 0),
- X86_INTRINSIC_DATA(avx512_psrl_dq_512, INTR_TYPE_2OP_IMM8, X86ISD::VSRLDQ, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_b_128, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_b_256, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_b_512, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_d_128, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_d_256, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_d_512, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_q_128, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_q_256, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_q_512, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_w_128, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_w_256, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_w_512, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_b_128, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_b_256, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_b_512, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_d_128, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_d_256, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_d_512, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_q_128, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_q_256, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_q_512, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_w_128, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_w_256, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_w_512, CMP_MASK, X86ISD::TESTNM, 0),
X86_INTRINSIC_DATA(avx512_rcp14_pd_128, INTR_TYPE_1OP_MASK, X86ISD::FRCP, 0),
X86_INTRINSIC_DATA(avx512_rcp14_pd_256, INTR_TYPE_1OP_MASK, X86ISD::FRCP, 0),
X86_INTRINSIC_DATA(avx512_rcp14_pd_512, INTR_TYPE_1OP_MASK, X86ISD::FRCP, 0),
X86_INTRINSIC_DATA(avx512_rcp14_ps_128, INTR_TYPE_1OP_MASK, X86ISD::FRCP, 0),
X86_INTRINSIC_DATA(avx512_rcp14_ps_256, INTR_TYPE_1OP_MASK, X86ISD::FRCP, 0),
X86_INTRINSIC_DATA(avx512_rcp14_ps_512, INTR_TYPE_1OP_MASK, X86ISD::FRCP, 0),
- X86_INTRINSIC_DATA(avx512_rcp14_sd, INTR_TYPE_SCALAR_MASK, X86ISD::FRCP, 0),
- X86_INTRINSIC_DATA(avx512_rcp14_ss, INTR_TYPE_SCALAR_MASK, X86ISD::FRCP, 0),
+ X86_INTRINSIC_DATA(avx512_rcp14_sd, INTR_TYPE_SCALAR_MASK, X86ISD::FRCPS, 0),
+ X86_INTRINSIC_DATA(avx512_rcp14_ss, INTR_TYPE_SCALAR_MASK, X86ISD::FRCPS, 0),
X86_INTRINSIC_DATA(avx512_rcp28_pd, INTR_TYPE_1OP_MASK_RM, X86ISD::RCP28, 0),
X86_INTRINSIC_DATA(avx512_rcp28_ps, INTR_TYPE_1OP_MASK_RM, X86ISD::RCP28, 0),
X86_INTRINSIC_DATA(avx512_rcp28_sd, INTR_TYPE_SCALAR_MASK_RM, X86ISD::RCP28, 0),
@@ -1895,29 +1811,30 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_rsqrt14_ps_128, INTR_TYPE_1OP_MASK, X86ISD::FRSQRT, 0),
X86_INTRINSIC_DATA(avx512_rsqrt14_ps_256, INTR_TYPE_1OP_MASK, X86ISD::FRSQRT, 0),
X86_INTRINSIC_DATA(avx512_rsqrt14_ps_512, INTR_TYPE_1OP_MASK, X86ISD::FRSQRT, 0),
- X86_INTRINSIC_DATA(avx512_rsqrt14_sd, INTR_TYPE_SCALAR_MASK, X86ISD::FRSQRT, 0),
- X86_INTRINSIC_DATA(avx512_rsqrt14_ss, INTR_TYPE_SCALAR_MASK, X86ISD::FRSQRT, 0),
+ X86_INTRINSIC_DATA(avx512_rsqrt14_sd, INTR_TYPE_SCALAR_MASK, X86ISD::FRSQRTS, 0),
+ X86_INTRINSIC_DATA(avx512_rsqrt14_ss, INTR_TYPE_SCALAR_MASK, X86ISD::FRSQRTS, 0),
X86_INTRINSIC_DATA(avx512_rsqrt28_pd, INTR_TYPE_1OP_MASK_RM,X86ISD::RSQRT28, 0),
X86_INTRINSIC_DATA(avx512_rsqrt28_ps, INTR_TYPE_1OP_MASK_RM,X86ISD::RSQRT28, 0),
X86_INTRINSIC_DATA(avx512_rsqrt28_sd, INTR_TYPE_SCALAR_MASK_RM,X86ISD::RSQRT28, 0),
X86_INTRINSIC_DATA(avx512_rsqrt28_ss, INTR_TYPE_SCALAR_MASK_RM,X86ISD::RSQRT28, 0),
X86_INTRINSIC_DATA(avx512_vcomi_sd, COMI_RM, X86ISD::COMI, X86ISD::UCOMI),
X86_INTRINSIC_DATA(avx512_vcomi_ss, COMI_RM, X86ISD::COMI, X86ISD::UCOMI),
- X86_INTRINSIC_DATA(avx_hadd_pd_256, INTR_TYPE_2OP, X86ISD::FHADD, 0),
- X86_INTRINSIC_DATA(avx_hadd_ps_256, INTR_TYPE_2OP, X86ISD::FHADD, 0),
- X86_INTRINSIC_DATA(avx_hsub_pd_256, INTR_TYPE_2OP, X86ISD::FHSUB, 0),
- X86_INTRINSIC_DATA(avx_hsub_ps_256, INTR_TYPE_2OP, X86ISD::FHSUB, 0),
- X86_INTRINSIC_DATA(avx_max_pd_256, INTR_TYPE_2OP, X86ISD::FMAX, 0),
- X86_INTRINSIC_DATA(avx_max_ps_256, INTR_TYPE_2OP, X86ISD::FMAX, 0),
- X86_INTRINSIC_DATA(avx_min_pd_256, INTR_TYPE_2OP, X86ISD::FMIN, 0),
- X86_INTRINSIC_DATA(avx_min_ps_256, INTR_TYPE_2OP, X86ISD::FMIN, 0),
- X86_INTRINSIC_DATA(avx_rcp_ps_256, INTR_TYPE_1OP, X86ISD::FRCP, 0),
- X86_INTRINSIC_DATA(avx_rsqrt_ps_256, INTR_TYPE_1OP, X86ISD::FRSQRT, 0),
- X86_INTRINSIC_DATA(avx_sqrt_pd_256, INTR_TYPE_1OP, ISD::FSQRT, 0),
- X86_INTRINSIC_DATA(avx_sqrt_ps_256, INTR_TYPE_1OP, ISD::FSQRT, 0),
- X86_INTRINSIC_DATA(avx_vperm2f128_pd_256, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
- X86_INTRINSIC_DATA(avx_vperm2f128_ps_256, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
- X86_INTRINSIC_DATA(avx_vperm2f128_si_256, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
+ X86_INTRINSIC_DATA(avx512_vcvtsd2si32, INTR_TYPE_2OP,
+ X86ISD::SCALAR_FP_TO_SINT_RND, 0),
+ X86_INTRINSIC_DATA(avx512_vcvtsd2si64, INTR_TYPE_2OP,
+ X86ISD::SCALAR_FP_TO_SINT_RND, 0),
+ X86_INTRINSIC_DATA(avx512_vcvtsd2usi32, INTR_TYPE_2OP,
+ X86ISD::SCALAR_FP_TO_UINT_RND, 0),
+ X86_INTRINSIC_DATA(avx512_vcvtsd2usi64, INTR_TYPE_2OP,
+ X86ISD::SCALAR_FP_TO_UINT_RND, 0),
+ X86_INTRINSIC_DATA(avx512_vcvtss2si32, INTR_TYPE_2OP,
+ X86ISD::SCALAR_FP_TO_SINT_RND, 0),
+ X86_INTRINSIC_DATA(avx512_vcvtss2si64, INTR_TYPE_2OP,
+ X86ISD::SCALAR_FP_TO_SINT_RND, 0),
+ X86_INTRINSIC_DATA(avx512_vcvtss2usi32, INTR_TYPE_2OP,
+ X86ISD::SCALAR_FP_TO_UINT_RND, 0),
+ X86_INTRINSIC_DATA(avx512_vcvtss2usi64, INTR_TYPE_2OP,
+ X86ISD::SCALAR_FP_TO_UINT_RND, 0),
X86_INTRINSIC_DATA(fma_vfmadd_pd, INTR_TYPE_3OP, X86ISD::FMADD, 0),
X86_INTRINSIC_DATA(fma_vfmadd_pd_256, INTR_TYPE_3OP, X86ISD::FMADD, 0),
X86_INTRINSIC_DATA(fma_vfmadd_ps, INTR_TYPE_3OP, X86ISD::FMADD, 0),
@@ -1942,6 +1859,24 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(fma_vfnmsub_pd_256, INTR_TYPE_3OP, X86ISD::FNMSUB, 0),
X86_INTRINSIC_DATA(fma_vfnmsub_ps, INTR_TYPE_3OP, X86ISD::FNMSUB, 0),
X86_INTRINSIC_DATA(fma_vfnmsub_ps_256, INTR_TYPE_3OP, X86ISD::FNMSUB, 0),
+ X86_INTRINSIC_DATA(sse_comieq_ss, COMI, X86ISD::COMI, ISD::SETEQ),
+ X86_INTRINSIC_DATA(sse_comige_ss, COMI, X86ISD::COMI, ISD::SETGE),
+ X86_INTRINSIC_DATA(sse_comigt_ss, COMI, X86ISD::COMI, ISD::SETGT),
+ X86_INTRINSIC_DATA(sse_comile_ss, COMI, X86ISD::COMI, ISD::SETLE),
+ X86_INTRINSIC_DATA(sse_comilt_ss, COMI, X86ISD::COMI, ISD::SETLT),
+ X86_INTRINSIC_DATA(sse_comineq_ss, COMI, X86ISD::COMI, ISD::SETNE),
+ X86_INTRINSIC_DATA(sse_max_ps, INTR_TYPE_2OP, X86ISD::FMAX, 0),
+ X86_INTRINSIC_DATA(sse_min_ps, INTR_TYPE_2OP, X86ISD::FMIN, 0),
+ X86_INTRINSIC_DATA(sse_movmsk_ps, INTR_TYPE_1OP, X86ISD::MOVMSK, 0),
+ X86_INTRINSIC_DATA(sse_rcp_ps, INTR_TYPE_1OP, X86ISD::FRCP, 0),
+ X86_INTRINSIC_DATA(sse_rsqrt_ps, INTR_TYPE_1OP, X86ISD::FRSQRT, 0),
+ X86_INTRINSIC_DATA(sse_sqrt_ps, INTR_TYPE_1OP, ISD::FSQRT, 0),
+ X86_INTRINSIC_DATA(sse_ucomieq_ss, COMI, X86ISD::UCOMI, ISD::SETEQ),
+ X86_INTRINSIC_DATA(sse_ucomige_ss, COMI, X86ISD::UCOMI, ISD::SETGE),
+ X86_INTRINSIC_DATA(sse_ucomigt_ss, COMI, X86ISD::UCOMI, ISD::SETGT),
+ X86_INTRINSIC_DATA(sse_ucomile_ss, COMI, X86ISD::UCOMI, ISD::SETLE),
+ X86_INTRINSIC_DATA(sse_ucomilt_ss, COMI, X86ISD::UCOMI, ISD::SETLT),
+ X86_INTRINSIC_DATA(sse_ucomineq_ss, COMI, X86ISD::UCOMI, ISD::SETNE),
X86_INTRINSIC_DATA(sse2_comieq_sd, COMI, X86ISD::COMI, ISD::SETEQ),
X86_INTRINSIC_DATA(sse2_comige_sd, COMI, X86ISD::COMI, ISD::SETGE),
X86_INTRINSIC_DATA(sse2_comigt_sd, COMI, X86ISD::COMI, ISD::SETGT),
@@ -1950,22 +1885,21 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(sse2_comineq_sd, COMI, X86ISD::COMI, ISD::SETNE),
X86_INTRINSIC_DATA(sse2_max_pd, INTR_TYPE_2OP, X86ISD::FMAX, 0),
X86_INTRINSIC_DATA(sse2_min_pd, INTR_TYPE_2OP, X86ISD::FMIN, 0),
+ X86_INTRINSIC_DATA(sse2_movmsk_pd, INTR_TYPE_1OP, X86ISD::MOVMSK, 0),
X86_INTRINSIC_DATA(sse2_packssdw_128, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
X86_INTRINSIC_DATA(sse2_packsswb_128, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
X86_INTRINSIC_DATA(sse2_packuswb_128, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
+ X86_INTRINSIC_DATA(sse2_padds_b, INTR_TYPE_2OP, X86ISD::ADDS, 0),
+ X86_INTRINSIC_DATA(sse2_padds_w, INTR_TYPE_2OP, X86ISD::ADDS, 0),
+ X86_INTRINSIC_DATA(sse2_paddus_b, INTR_TYPE_2OP, X86ISD::ADDUS, 0),
+ X86_INTRINSIC_DATA(sse2_paddus_w, INTR_TYPE_2OP, X86ISD::ADDUS, 0),
X86_INTRINSIC_DATA(sse2_pavg_b, INTR_TYPE_2OP, X86ISD::AVG, 0),
X86_INTRINSIC_DATA(sse2_pavg_w, INTR_TYPE_2OP, X86ISD::AVG, 0),
- X86_INTRINSIC_DATA(sse2_pmaxs_w, INTR_TYPE_2OP, ISD::SMAX, 0),
- X86_INTRINSIC_DATA(sse2_pmaxu_b, INTR_TYPE_2OP, ISD::UMAX, 0),
- X86_INTRINSIC_DATA(sse2_pmins_w, INTR_TYPE_2OP, ISD::SMIN, 0),
- X86_INTRINSIC_DATA(sse2_pminu_b, INTR_TYPE_2OP, ISD::UMIN, 0),
+ X86_INTRINSIC_DATA(sse2_pmovmskb_128, INTR_TYPE_1OP, X86ISD::MOVMSK, 0),
X86_INTRINSIC_DATA(sse2_pmulh_w, INTR_TYPE_2OP, ISD::MULHS, 0),
X86_INTRINSIC_DATA(sse2_pmulhu_w, INTR_TYPE_2OP, ISD::MULHU, 0),
X86_INTRINSIC_DATA(sse2_pmulu_dq, INTR_TYPE_2OP, X86ISD::PMULUDQ, 0),
X86_INTRINSIC_DATA(sse2_psad_bw, INTR_TYPE_2OP, X86ISD::PSADBW, 0),
- X86_INTRINSIC_DATA(sse2_pshuf_d, INTR_TYPE_2OP, X86ISD::PSHUFD, 0),
- X86_INTRINSIC_DATA(sse2_pshufh_w, INTR_TYPE_2OP, X86ISD::PSHUFHW, 0),
- X86_INTRINSIC_DATA(sse2_pshufl_w, INTR_TYPE_2OP, X86ISD::PSHUFLW, 0),
X86_INTRINSIC_DATA(sse2_psll_d, INTR_TYPE_2OP, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(sse2_psll_q, INTR_TYPE_2OP, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(sse2_psll_w, INTR_TYPE_2OP, X86ISD::VSHL, 0),
@@ -1982,6 +1916,8 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(sse2_psrli_d, VSHIFT, X86ISD::VSRLI, 0),
X86_INTRINSIC_DATA(sse2_psrli_q, VSHIFT, X86ISD::VSRLI, 0),
X86_INTRINSIC_DATA(sse2_psrli_w, VSHIFT, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(sse2_psubs_b, INTR_TYPE_2OP, X86ISD::SUBS, 0),
+ X86_INTRINSIC_DATA(sse2_psubs_w, INTR_TYPE_2OP, X86ISD::SUBS, 0),
X86_INTRINSIC_DATA(sse2_psubus_b, INTR_TYPE_2OP, X86ISD::SUBUS, 0),
X86_INTRINSIC_DATA(sse2_psubus_w, INTR_TYPE_2OP, X86ISD::SUBUS, 0),
X86_INTRINSIC_DATA(sse2_sqrt_pd, INTR_TYPE_1OP, ISD::FSQRT, 0),
@@ -1997,48 +1933,17 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(sse3_hsub_ps, INTR_TYPE_2OP, X86ISD::FHSUB, 0),
X86_INTRINSIC_DATA(sse41_insertps, INTR_TYPE_3OP, X86ISD::INSERTPS, 0),
X86_INTRINSIC_DATA(sse41_packusdw, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
- X86_INTRINSIC_DATA(sse41_pmaxsb, INTR_TYPE_2OP, ISD::SMAX, 0),
- X86_INTRINSIC_DATA(sse41_pmaxsd, INTR_TYPE_2OP, ISD::SMAX, 0),
- X86_INTRINSIC_DATA(sse41_pmaxud, INTR_TYPE_2OP, ISD::UMAX, 0),
- X86_INTRINSIC_DATA(sse41_pmaxuw, INTR_TYPE_2OP, ISD::UMAX, 0),
- X86_INTRINSIC_DATA(sse41_pminsb, INTR_TYPE_2OP, ISD::SMIN, 0),
- X86_INTRINSIC_DATA(sse41_pminsd, INTR_TYPE_2OP, ISD::SMIN, 0),
- X86_INTRINSIC_DATA(sse41_pminud, INTR_TYPE_2OP, ISD::UMIN, 0),
- X86_INTRINSIC_DATA(sse41_pminuw, INTR_TYPE_2OP, ISD::UMIN, 0),
- X86_INTRINSIC_DATA(sse41_pmovzxbd, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(sse41_pmovzxbq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(sse41_pmovzxbw, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(sse41_pmovzxdq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(sse41_pmovzxwd, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(sse41_pmovzxwq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
X86_INTRINSIC_DATA(sse41_pmuldq, INTR_TYPE_2OP, X86ISD::PMULDQ, 0),
X86_INTRINSIC_DATA(sse4a_extrqi, INTR_TYPE_3OP, X86ISD::EXTRQI, 0),
X86_INTRINSIC_DATA(sse4a_insertqi, INTR_TYPE_4OP, X86ISD::INSERTQI, 0),
- X86_INTRINSIC_DATA(sse_comieq_ss, COMI, X86ISD::COMI, ISD::SETEQ),
- X86_INTRINSIC_DATA(sse_comige_ss, COMI, X86ISD::COMI, ISD::SETGE),
- X86_INTRINSIC_DATA(sse_comigt_ss, COMI, X86ISD::COMI, ISD::SETGT),
- X86_INTRINSIC_DATA(sse_comile_ss, COMI, X86ISD::COMI, ISD::SETLE),
- X86_INTRINSIC_DATA(sse_comilt_ss, COMI, X86ISD::COMI, ISD::SETLT),
- X86_INTRINSIC_DATA(sse_comineq_ss, COMI, X86ISD::COMI, ISD::SETNE),
- X86_INTRINSIC_DATA(sse_max_ps, INTR_TYPE_2OP, X86ISD::FMAX, 0),
- X86_INTRINSIC_DATA(sse_min_ps, INTR_TYPE_2OP, X86ISD::FMIN, 0),
- X86_INTRINSIC_DATA(sse_rcp_ps, INTR_TYPE_1OP, X86ISD::FRCP, 0),
- X86_INTRINSIC_DATA(sse_rsqrt_ps, INTR_TYPE_1OP, X86ISD::FRSQRT, 0),
- X86_INTRINSIC_DATA(sse_sqrt_ps, INTR_TYPE_1OP, ISD::FSQRT, 0),
- X86_INTRINSIC_DATA(sse_ucomieq_ss, COMI, X86ISD::UCOMI, ISD::SETEQ),
- X86_INTRINSIC_DATA(sse_ucomige_ss, COMI, X86ISD::UCOMI, ISD::SETGE),
- X86_INTRINSIC_DATA(sse_ucomigt_ss, COMI, X86ISD::UCOMI, ISD::SETGT),
- X86_INTRINSIC_DATA(sse_ucomile_ss, COMI, X86ISD::UCOMI, ISD::SETLE),
- X86_INTRINSIC_DATA(sse_ucomilt_ss, COMI, X86ISD::UCOMI, ISD::SETLT),
- X86_INTRINSIC_DATA(sse_ucomineq_ss, COMI, X86ISD::UCOMI, ISD::SETNE),
+ X86_INTRINSIC_DATA(ssse3_pabs_b_128, INTR_TYPE_1OP, X86ISD::ABS, 0),
+ X86_INTRINSIC_DATA(ssse3_pabs_d_128, INTR_TYPE_1OP, X86ISD::ABS, 0),
+ X86_INTRINSIC_DATA(ssse3_pabs_w_128, INTR_TYPE_1OP, X86ISD::ABS, 0),
X86_INTRINSIC_DATA(ssse3_phadd_d_128, INTR_TYPE_2OP, X86ISD::HADD, 0),
X86_INTRINSIC_DATA(ssse3_phadd_w_128, INTR_TYPE_2OP, X86ISD::HADD, 0),
X86_INTRINSIC_DATA(ssse3_phsub_d_128, INTR_TYPE_2OP, X86ISD::HSUB, 0),
X86_INTRINSIC_DATA(ssse3_phsub_w_128, INTR_TYPE_2OP, X86ISD::HSUB, 0),
X86_INTRINSIC_DATA(ssse3_pshuf_b_128, INTR_TYPE_2OP, X86ISD::PSHUFB, 0),
- X86_INTRINSIC_DATA(ssse3_psign_b_128, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
- X86_INTRINSIC_DATA(ssse3_psign_d_128, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
- X86_INTRINSIC_DATA(ssse3_psign_w_128, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
X86_INTRINSIC_DATA(xop_vpcomb, INTR_TYPE_3OP, X86ISD::VPCOM, 0),
X86_INTRINSIC_DATA(xop_vpcomd, INTR_TYPE_3OP, X86ISD::VPCOM, 0),
X86_INTRINSIC_DATA(xop_vpcomq, INTR_TYPE_3OP, X86ISD::VPCOM, 0),
@@ -2047,6 +1952,11 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(xop_vpcomuq, INTR_TYPE_3OP, X86ISD::VPCOMU, 0),
X86_INTRINSIC_DATA(xop_vpcomuw, INTR_TYPE_3OP, X86ISD::VPCOMU, 0),
X86_INTRINSIC_DATA(xop_vpcomw, INTR_TYPE_3OP, X86ISD::VPCOM, 0),
+ X86_INTRINSIC_DATA(xop_vpermil2pd, INTR_TYPE_4OP, X86ISD::VPERMIL2, 0),
+ X86_INTRINSIC_DATA(xop_vpermil2pd_256, INTR_TYPE_4OP, X86ISD::VPERMIL2, 0),
+ X86_INTRINSIC_DATA(xop_vpermil2ps, INTR_TYPE_4OP, X86ISD::VPERMIL2, 0),
+ X86_INTRINSIC_DATA(xop_vpermil2ps_256, INTR_TYPE_4OP, X86ISD::VPERMIL2, 0),
+ X86_INTRINSIC_DATA(xop_vpperm, INTR_TYPE_3OP, X86ISD::VPPERM, 0),
X86_INTRINSIC_DATA(xop_vprotb, INTR_TYPE_2OP, X86ISD::VPROT, 0),
X86_INTRINSIC_DATA(xop_vprotbi, INTR_TYPE_2OP, X86ISD::VPROTI, 0),
X86_INTRINSIC_DATA(xop_vprotd, INTR_TYPE_2OP, X86ISD::VPROT, 0),
@@ -2069,7 +1979,7 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
* Retrieve data for Intrinsic without chain.
* Return nullptr if intrinsic is not defined in the table.
*/
-static const IntrinsicData* getIntrinsicWithoutChain(unsigned IntNo) {
+static const IntrinsicData* getIntrinsicWithoutChain(uint16_t IntNo) {
IntrinsicData IntrinsicToFind = { IntNo, INTR_NO_TYPE, 0, 0 };
const IntrinsicData *Data = std::lower_bound(std::begin(IntrinsicsWithoutChain),
std::end(IntrinsicsWithoutChain),
@@ -2093,96 +2003,6 @@ static void verifyIntrinsicTables() {
std::end(IntrinsicsWithChain)) &&
"Intrinsic data tables should have unique entries");
}
-
-// X86 specific compare constants.
-// They must be kept in synch with avxintrin.h
-#define _X86_CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */
-#define _X86_CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */
-#define _X86_CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */
-#define _X86_CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */
-#define _X86_CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */
-#define _X86_CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */
-#define _X86_CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */
-#define _X86_CMP_ORD_Q 0x07 /* Ordered (nonsignaling) */
-#define _X86_CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */
-#define _X86_CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unord, signaling) */
-#define _X86_CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */
-#define _X86_CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling) */
-#define _X86_CMP_NEQ_OQ 0x0c /* Not-equal (ordered, non-signaling) */
-#define _X86_CMP_GE_OS 0x0d /* Greater-than-or-equal (ordered, signaling) */
-#define _X86_CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */
-#define _X86_CMP_TRUE_UQ 0x0f /* True (unordered, non-signaling) */
-#define _X86_CMP_EQ_OS 0x10 /* Equal (ordered, signaling) */
-#define _X86_CMP_LT_OQ 0x11 /* Less-than (ordered, non-signaling) */
-#define _X86_CMP_LE_OQ 0x12 /* Less-than-or-equal (ordered, non-signaling) */
-#define _X86_CMP_UNORD_S 0x13 /* Unordered (signaling) */
-#define _X86_CMP_NEQ_US 0x14 /* Not-equal (unordered, signaling) */
-#define _X86_CMP_NLT_UQ 0x15 /* Not-less-than (unordered, non-signaling) */
-#define _X86_CMP_NLE_UQ 0x16 /* Not-less-than-or-equal (unord, non-signaling) */
-#define _X86_CMP_ORD_S 0x17 /* Ordered (signaling) */
-#define _X86_CMP_EQ_US 0x18 /* Equal (unordered, signaling) */
-#define _X86_CMP_NGE_UQ 0x19 /* Not-greater-than-or-equal (unord, non-sign) */
-#define _X86_CMP_NGT_UQ 0x1a /* Not-greater-than (unordered, non-signaling) */
-#define _X86_CMP_FALSE_OS 0x1b /* False (ordered, signaling) */
-#define _X86_CMP_NEQ_OS 0x1c /* Not-equal (ordered, signaling) */
-#define _X86_CMP_GE_OQ 0x1d /* Greater-than-or-equal (ordered, non-signaling) */
-#define _X86_CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */
-#define _X86_CMP_TRUE_US 0x1f /* True (unordered, signaling) */
-
-/*
-* Get comparison modifier from _mm_comi_round_sd/ss intrinsic
-* Return tuple <isOrdered, X86 condcode>
-*/
-static std::tuple<bool,unsigned> TranslateX86ConstCondToX86CC(SDValue &imm) {
- ConstantSDNode *CImm = dyn_cast<ConstantSDNode>(imm);
- unsigned IntImm = CImm->getZExtValue();
- // On a floating point condition, the flags are set as follows:
- // ZF PF CF op
- // 0 | 0 | 0 | X > Y
- // 0 | 0 | 1 | X < Y
- // 1 | 0 | 0 | X == Y
- // 1 | 1 | 1 | unordered
- switch (IntImm) {
- default: llvm_unreachable("Invalid floating point compare value for Comi!");
- case _X86_CMP_EQ_OQ: // 0x00 - Equal (ordered, nonsignaling)
- case _X86_CMP_EQ_OS: // 0x10 - Equal (ordered, signaling)
- return std::make_tuple(true, X86::COND_E);
- case _X86_CMP_EQ_UQ: // 0x08 - Equal (unordered, non-signaling)
- case _X86_CMP_EQ_US: // 0x18 - Equal (unordered, signaling)
- return std::make_tuple(false , X86::COND_E);
- case _X86_CMP_LT_OS: // 0x01 - Less-than (ordered, signaling)
- case _X86_CMP_LT_OQ: // 0x11 - Less-than (ordered, nonsignaling)
- return std::make_tuple(true, X86::COND_B);
- case _X86_CMP_NGE_US: // 0x09 - Not-greater-than-or-equal (unordered, signaling)
- case _X86_CMP_NGE_UQ: // 0x19 - Not-greater-than-or-equal (unordered, nonsignaling)
- return std::make_tuple(false , X86::COND_B);
- case _X86_CMP_LE_OS: // 0x02 - Less-than-or-equal (ordered, signaling)
- case _X86_CMP_LE_OQ: // 0x12 - Less-than-or-equal (ordered, nonsignaling)
- return std::make_tuple(true, X86::COND_BE);
- case _X86_CMP_NGT_US: // 0x0A - Not-greater-than (unordered, signaling)
- case _X86_CMP_NGT_UQ: // 0x1A - Not-greater-than (unordered, nonsignaling)
- return std::make_tuple(false, X86::COND_BE);
- case _X86_CMP_GT_OS: // 0x0E - Greater-than (ordered, signaling)
- case _X86_CMP_GT_OQ: // 0x1E - Greater-than (ordered, nonsignaling)
- return std::make_tuple(true, X86::COND_A);
- case _X86_CMP_NLE_US: // 0x06 - Not-less-than-or-equal (unordered,signaling)
- case _X86_CMP_NLE_UQ: // 0x16 - Not-less-than-or-equal (unordered, nonsignaling)
- return std::make_tuple(false, X86::COND_A);
- case _X86_CMP_GE_OS: // 0x0D - Greater-than-or-equal (ordered, signaling)
- case _X86_CMP_GE_OQ: // 0x1D - Greater-than-or-equal (ordered, nonsignaling)
- return std::make_tuple(true, X86::COND_AE);
- case _X86_CMP_NLT_US: // 0x05 - Not-less-than (unordered, signaling)
- case _X86_CMP_NLT_UQ: // 0x15 - Not-less-than (unordered, nonsignaling)
- return std::make_tuple(false, X86::COND_AE);
- case _X86_CMP_NEQ_OQ: // 0x0C - Not-equal (ordered, non-signaling)
- case _X86_CMP_NEQ_OS: // 0x1C - Not-equal (ordered, signaling)
- return std::make_tuple(true, X86::COND_NE);
- case _X86_CMP_NEQ_UQ: // 0x04 - Not-equal (unordered, nonsignaling)
- case _X86_CMP_NEQ_US: // 0x14 - Not-equal (unordered, signaling)
- return std::make_tuple(false, X86::COND_NE);
- }
-}
-
} // End llvm namespace
#endif
diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp
index e1ca558f0f2c9..906e3427b2ff8 100644
--- a/lib/Target/X86/X86MCInstLower.cpp
+++ b/lib/Target/X86/X86MCInstLower.cpp
@@ -20,6 +20,7 @@
#include "Utils/X86ShuffleDecode.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineOperand.h"
@@ -35,9 +36,15 @@
#include "llvm/MC/MCFixup.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstBuilder.h"
+#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/MC/MCSectionELF.h"
#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/ELF.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+
using namespace llvm;
namespace {
@@ -72,47 +79,33 @@ private:
static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit,
const MCSubtargetInfo &STI);
-namespace llvm {
- X86AsmPrinter::StackMapShadowTracker::StackMapShadowTracker(TargetMachine &TM)
- : TM(TM), InShadow(false), RequiredShadowSize(0), CurrentShadowSize(0) {}
-
- X86AsmPrinter::StackMapShadowTracker::~StackMapShadowTracker() {}
-
- void
- X86AsmPrinter::StackMapShadowTracker::startFunction(MachineFunction &F) {
- MF = &F;
- CodeEmitter.reset(TM.getTarget().createMCCodeEmitter(
- *MF->getSubtarget().getInstrInfo(),
- *MF->getSubtarget().getRegisterInfo(), MF->getContext()));
- }
-
- void X86AsmPrinter::StackMapShadowTracker::count(MCInst &Inst,
- const MCSubtargetInfo &STI) {
- if (InShadow) {
- SmallString<256> Code;
- SmallVector<MCFixup, 4> Fixups;
- raw_svector_ostream VecOS(Code);
- CodeEmitter->encodeInstruction(Inst, VecOS, Fixups, STI);
- CurrentShadowSize += Code.size();
- if (CurrentShadowSize >= RequiredShadowSize)
- InShadow = false; // The shadow is big enough. Stop counting.
- }
+void X86AsmPrinter::StackMapShadowTracker::count(MCInst &Inst,
+ const MCSubtargetInfo &STI,
+ MCCodeEmitter *CodeEmitter) {
+ if (InShadow) {
+ SmallString<256> Code;
+ SmallVector<MCFixup, 4> Fixups;
+ raw_svector_ostream VecOS(Code);
+ CodeEmitter->encodeInstruction(Inst, VecOS, Fixups, STI);
+ CurrentShadowSize += Code.size();
+ if (CurrentShadowSize >= RequiredShadowSize)
+ InShadow = false; // The shadow is big enough. Stop counting.
}
+}
- void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding(
+void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding(
MCStreamer &OutStreamer, const MCSubtargetInfo &STI) {
- if (InShadow && CurrentShadowSize < RequiredShadowSize) {
- InShadow = false;
- EmitNops(OutStreamer, RequiredShadowSize - CurrentShadowSize,
- MF->getSubtarget<X86Subtarget>().is64Bit(), STI);
- }
+ if (InShadow && CurrentShadowSize < RequiredShadowSize) {
+ InShadow = false;
+ EmitNops(OutStreamer, RequiredShadowSize - CurrentShadowSize,
+ MF->getSubtarget<X86Subtarget>().is64Bit(), STI);
}
+}
- void X86AsmPrinter::EmitAndCountInstruction(MCInst &Inst) {
- OutStreamer->EmitInstruction(Inst, getSubtargetInfo());
- SMShadowTracker.count(Inst, getSubtargetInfo());
- }
-} // end llvm namespace
+void X86AsmPrinter::EmitAndCountInstruction(MCInst &Inst) {
+ OutStreamer->EmitInstruction(Inst, getSubtargetInfo());
+ SMShadowTracker.count(Inst, getSubtargetInfo(), CodeEmitter.get());
+}
X86MCInstLower::X86MCInstLower(const MachineFunction &mf,
X86AsmPrinter &asmprinter)
@@ -140,12 +133,8 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
// Handle dllimport linkage.
Name += "__imp_";
break;
- case X86II::MO_DARWIN_STUB:
- Suffix = "$stub";
- break;
case X86II::MO_DARWIN_NONLAZY:
case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
- case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
Suffix = "$non_lazy_ptr";
break;
}
@@ -153,8 +142,6 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
if (!Suffix.empty())
Name += DL.getPrivateGlobalPrefix();
- unsigned PrefixLen = Name.size();
-
if (MO.isGlobal()) {
const GlobalValue *GV = MO.getGlobal();
AsmPrinter.getNameWithPrefix(Name, GV);
@@ -164,14 +151,11 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
assert(Suffix.empty());
Sym = MO.getMBB()->getSymbol();
}
- unsigned OrigLen = Name.size() - PrefixLen;
Name += Suffix;
if (!Sym)
Sym = Ctx.getOrCreateSymbol(Name);
- StringRef OrigName = StringRef(Name).substr(PrefixLen, OrigLen);
-
// If the target flags on the operand changes the name of the symbol, do that
// before we return the symbol.
switch (MO.getTargetFlags()) {
@@ -189,36 +173,6 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
}
break;
}
- case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: {
- MachineModuleInfoImpl::StubValueTy &StubSym =
- getMachOMMI().getHiddenGVStubEntry(Sym);
- if (!StubSym.getPointer()) {
- assert(MO.isGlobal() && "Extern symbol not handled yet");
- StubSym =
- MachineModuleInfoImpl::
- StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()),
- !MO.getGlobal()->hasInternalLinkage());
- }
- break;
- }
- case X86II::MO_DARWIN_STUB: {
- MachineModuleInfoImpl::StubValueTy &StubSym =
- getMachOMMI().getFnStubEntry(Sym);
- if (StubSym.getPointer())
- return Sym;
-
- if (MO.isGlobal()) {
- StubSym =
- MachineModuleInfoImpl::
- StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()),
- !MO.getGlobal()->hasInternalLinkage());
- } else {
- StubSym =
- MachineModuleInfoImpl::
- StubValueTy(Ctx.getOrCreateSymbol(OrigName), false);
- }
- break;
- }
}
return Sym;
@@ -237,7 +191,6 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
// These affect the name of the symbol, not any suffix.
case X86II::MO_DARWIN_NONLAZY:
case X86II::MO_DLLIMPORT:
- case X86II::MO_DARWIN_STUB:
break;
case X86II::MO_TLVP: RefKind = MCSymbolRefExpr::VK_TLVP; break;
@@ -265,14 +218,13 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
case X86II::MO_PLT: RefKind = MCSymbolRefExpr::VK_PLT; break;
case X86II::MO_PIC_BASE_OFFSET:
case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
- case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
Expr = MCSymbolRefExpr::create(Sym, Ctx);
// Subtract the pic base.
Expr = MCBinaryExpr::createSub(Expr,
MCSymbolRefExpr::create(MF.getPICBaseSymbol(), Ctx),
Ctx);
if (MO.isJTI()) {
- assert(MAI.doesSetDirectiveSuppressesReloc());
+ assert(MAI.doesSetDirectiveSuppressReloc());
// If .set directive is supported, use it to reduce the number of
// relocations the assembler will generate for differences between
// local labels. This is only safe when the symbols are in the same
@@ -653,50 +605,81 @@ ReSimplify:
// MOV64ao8, MOV64o8a
// XCHG16ar, XCHG32ar, XCHG64ar
case X86::MOV8mr_NOREX:
- case X86::MOV8mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8o32a); break;
+ case X86::MOV8mr:
case X86::MOV8rm_NOREX:
- case X86::MOV8rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8ao32); break;
- case X86::MOV16mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16o32a); break;
- case X86::MOV16rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16ao32); break;
- case X86::MOV32mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32o32a); break;
- case X86::MOV32rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32ao32); break;
-
- case X86::ADC8ri: SimplifyShortImmForm(OutMI, X86::ADC8i8); break;
- case X86::ADC16ri: SimplifyShortImmForm(OutMI, X86::ADC16i16); break;
- case X86::ADC32ri: SimplifyShortImmForm(OutMI, X86::ADC32i32); break;
- case X86::ADC64ri32: SimplifyShortImmForm(OutMI, X86::ADC64i32); break;
- case X86::ADD8ri: SimplifyShortImmForm(OutMI, X86::ADD8i8); break;
- case X86::ADD16ri: SimplifyShortImmForm(OutMI, X86::ADD16i16); break;
- case X86::ADD32ri: SimplifyShortImmForm(OutMI, X86::ADD32i32); break;
- case X86::ADD64ri32: SimplifyShortImmForm(OutMI, X86::ADD64i32); break;
- case X86::AND8ri: SimplifyShortImmForm(OutMI, X86::AND8i8); break;
- case X86::AND16ri: SimplifyShortImmForm(OutMI, X86::AND16i16); break;
- case X86::AND32ri: SimplifyShortImmForm(OutMI, X86::AND32i32); break;
- case X86::AND64ri32: SimplifyShortImmForm(OutMI, X86::AND64i32); break;
- case X86::CMP8ri: SimplifyShortImmForm(OutMI, X86::CMP8i8); break;
- case X86::CMP16ri: SimplifyShortImmForm(OutMI, X86::CMP16i16); break;
- case X86::CMP32ri: SimplifyShortImmForm(OutMI, X86::CMP32i32); break;
- case X86::CMP64ri32: SimplifyShortImmForm(OutMI, X86::CMP64i32); break;
- case X86::OR8ri: SimplifyShortImmForm(OutMI, X86::OR8i8); break;
- case X86::OR16ri: SimplifyShortImmForm(OutMI, X86::OR16i16); break;
- case X86::OR32ri: SimplifyShortImmForm(OutMI, X86::OR32i32); break;
- case X86::OR64ri32: SimplifyShortImmForm(OutMI, X86::OR64i32); break;
- case X86::SBB8ri: SimplifyShortImmForm(OutMI, X86::SBB8i8); break;
- case X86::SBB16ri: SimplifyShortImmForm(OutMI, X86::SBB16i16); break;
- case X86::SBB32ri: SimplifyShortImmForm(OutMI, X86::SBB32i32); break;
- case X86::SBB64ri32: SimplifyShortImmForm(OutMI, X86::SBB64i32); break;
- case X86::SUB8ri: SimplifyShortImmForm(OutMI, X86::SUB8i8); break;
- case X86::SUB16ri: SimplifyShortImmForm(OutMI, X86::SUB16i16); break;
- case X86::SUB32ri: SimplifyShortImmForm(OutMI, X86::SUB32i32); break;
- case X86::SUB64ri32: SimplifyShortImmForm(OutMI, X86::SUB64i32); break;
- case X86::TEST8ri: SimplifyShortImmForm(OutMI, X86::TEST8i8); break;
- case X86::TEST16ri: SimplifyShortImmForm(OutMI, X86::TEST16i16); break;
- case X86::TEST32ri: SimplifyShortImmForm(OutMI, X86::TEST32i32); break;
- case X86::TEST64ri32: SimplifyShortImmForm(OutMI, X86::TEST64i32); break;
- case X86::XOR8ri: SimplifyShortImmForm(OutMI, X86::XOR8i8); break;
- case X86::XOR16ri: SimplifyShortImmForm(OutMI, X86::XOR16i16); break;
- case X86::XOR32ri: SimplifyShortImmForm(OutMI, X86::XOR32i32); break;
- case X86::XOR64ri32: SimplifyShortImmForm(OutMI, X86::XOR64i32); break;
+ case X86::MOV8rm:
+ case X86::MOV16mr:
+ case X86::MOV16rm:
+ case X86::MOV32mr:
+ case X86::MOV32rm: {
+ unsigned NewOpc;
+ switch (OutMI.getOpcode()) {
+ default: llvm_unreachable("Invalid opcode");
+ case X86::MOV8mr_NOREX:
+ case X86::MOV8mr: NewOpc = X86::MOV8o32a; break;
+ case X86::MOV8rm_NOREX:
+ case X86::MOV8rm: NewOpc = X86::MOV8ao32; break;
+ case X86::MOV16mr: NewOpc = X86::MOV16o32a; break;
+ case X86::MOV16rm: NewOpc = X86::MOV16ao32; break;
+ case X86::MOV32mr: NewOpc = X86::MOV32o32a; break;
+ case X86::MOV32rm: NewOpc = X86::MOV32ao32; break;
+ }
+ SimplifyShortMoveForm(AsmPrinter, OutMI, NewOpc);
+ break;
+ }
+
+ case X86::ADC8ri: case X86::ADC16ri: case X86::ADC32ri: case X86::ADC64ri32:
+ case X86::ADD8ri: case X86::ADD16ri: case X86::ADD32ri: case X86::ADD64ri32:
+ case X86::AND8ri: case X86::AND16ri: case X86::AND32ri: case X86::AND64ri32:
+ case X86::CMP8ri: case X86::CMP16ri: case X86::CMP32ri: case X86::CMP64ri32:
+ case X86::OR8ri: case X86::OR16ri: case X86::OR32ri: case X86::OR64ri32:
+ case X86::SBB8ri: case X86::SBB16ri: case X86::SBB32ri: case X86::SBB64ri32:
+ case X86::SUB8ri: case X86::SUB16ri: case X86::SUB32ri: case X86::SUB64ri32:
+ case X86::TEST8ri:case X86::TEST16ri:case X86::TEST32ri:case X86::TEST64ri32:
+ case X86::XOR8ri: case X86::XOR16ri: case X86::XOR32ri: case X86::XOR64ri32: {
+ unsigned NewOpc;
+ switch (OutMI.getOpcode()) {
+ default: llvm_unreachable("Invalid opcode");
+ case X86::ADC8ri: NewOpc = X86::ADC8i8; break;
+ case X86::ADC16ri: NewOpc = X86::ADC16i16; break;
+ case X86::ADC32ri: NewOpc = X86::ADC32i32; break;
+ case X86::ADC64ri32: NewOpc = X86::ADC64i32; break;
+ case X86::ADD8ri: NewOpc = X86::ADD8i8; break;
+ case X86::ADD16ri: NewOpc = X86::ADD16i16; break;
+ case X86::ADD32ri: NewOpc = X86::ADD32i32; break;
+ case X86::ADD64ri32: NewOpc = X86::ADD64i32; break;
+ case X86::AND8ri: NewOpc = X86::AND8i8; break;
+ case X86::AND16ri: NewOpc = X86::AND16i16; break;
+ case X86::AND32ri: NewOpc = X86::AND32i32; break;
+ case X86::AND64ri32: NewOpc = X86::AND64i32; break;
+ case X86::CMP8ri: NewOpc = X86::CMP8i8; break;
+ case X86::CMP16ri: NewOpc = X86::CMP16i16; break;
+ case X86::CMP32ri: NewOpc = X86::CMP32i32; break;
+ case X86::CMP64ri32: NewOpc = X86::CMP64i32; break;
+ case X86::OR8ri: NewOpc = X86::OR8i8; break;
+ case X86::OR16ri: NewOpc = X86::OR16i16; break;
+ case X86::OR32ri: NewOpc = X86::OR32i32; break;
+ case X86::OR64ri32: NewOpc = X86::OR64i32; break;
+ case X86::SBB8ri: NewOpc = X86::SBB8i8; break;
+ case X86::SBB16ri: NewOpc = X86::SBB16i16; break;
+ case X86::SBB32ri: NewOpc = X86::SBB32i32; break;
+ case X86::SBB64ri32: NewOpc = X86::SBB64i32; break;
+ case X86::SUB8ri: NewOpc = X86::SUB8i8; break;
+ case X86::SUB16ri: NewOpc = X86::SUB16i16; break;
+ case X86::SUB32ri: NewOpc = X86::SUB32i32; break;
+ case X86::SUB64ri32: NewOpc = X86::SUB64i32; break;
+ case X86::TEST8ri: NewOpc = X86::TEST8i8; break;
+ case X86::TEST16ri: NewOpc = X86::TEST16i16; break;
+ case X86::TEST32ri: NewOpc = X86::TEST32i32; break;
+ case X86::TEST64ri32: NewOpc = X86::TEST64i32; break;
+ case X86::XOR8ri: NewOpc = X86::XOR8i8; break;
+ case X86::XOR16ri: NewOpc = X86::XOR16i16; break;
+ case X86::XOR32ri: NewOpc = X86::XOR32i32; break;
+ case X86::XOR64ri32: NewOpc = X86::XOR64i32; break;
+ }
+ SimplifyShortImmForm(OutMI, NewOpc);
+ break;
+ }
// Try to shrink some forms of movsx.
case X86::MOVSX16rr8:
@@ -785,55 +768,77 @@ void X86AsmPrinter::LowerTlsAddr(X86MCInstLower &MCInstLowering,
.addExpr(tlsRef));
}
-/// \brief Emit the optimal amount of multi-byte nops on X86.
-static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, const MCSubtargetInfo &STI) {
+/// \brief Emit the largest nop instruction smaller than or equal to \p NumBytes
+/// bytes. Return the size of nop emitted.
+static unsigned EmitNop(MCStreamer &OS, unsigned NumBytes, bool Is64Bit,
+ const MCSubtargetInfo &STI) {
// This works only for 64bit. For 32bit we have to do additional checking if
// the CPU supports multi-byte nops.
assert(Is64Bit && "EmitNops only supports X86-64");
- while (NumBytes) {
- unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg;
- Opc = IndexReg = Displacement = SegmentReg = 0;
- BaseReg = X86::RAX; ScaleVal = 1;
- switch (NumBytes) {
- case 0: llvm_unreachable("Zero nops?"); break;
- case 1: NumBytes -= 1; Opc = X86::NOOP; break;
- case 2: NumBytes -= 2; Opc = X86::XCHG16ar; break;
- case 3: NumBytes -= 3; Opc = X86::NOOPL; break;
- case 4: NumBytes -= 4; Opc = X86::NOOPL; Displacement = 8; break;
- case 5: NumBytes -= 5; Opc = X86::NOOPL; Displacement = 8;
- IndexReg = X86::RAX; break;
- case 6: NumBytes -= 6; Opc = X86::NOOPW; Displacement = 8;
- IndexReg = X86::RAX; break;
- case 7: NumBytes -= 7; Opc = X86::NOOPL; Displacement = 512; break;
- case 8: NumBytes -= 8; Opc = X86::NOOPL; Displacement = 512;
- IndexReg = X86::RAX; break;
- case 9: NumBytes -= 9; Opc = X86::NOOPW; Displacement = 512;
- IndexReg = X86::RAX; break;
- default: NumBytes -= 10; Opc = X86::NOOPW; Displacement = 512;
- IndexReg = X86::RAX; SegmentReg = X86::CS; break;
- }
- unsigned NumPrefixes = std::min(NumBytes, 5U);
- NumBytes -= NumPrefixes;
- for (unsigned i = 0; i != NumPrefixes; ++i)
- OS.EmitBytes("\x66");
+ unsigned NopSize;
+ unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg;
+ Opc = IndexReg = Displacement = SegmentReg = 0;
+ BaseReg = X86::RAX;
+ ScaleVal = 1;
+ switch (NumBytes) {
+ case 0: llvm_unreachable("Zero nops?"); break;
+ case 1: NopSize = 1; Opc = X86::NOOP; break;
+ case 2: NopSize = 2; Opc = X86::XCHG16ar; break;
+ case 3: NopSize = 3; Opc = X86::NOOPL; break;
+ case 4: NopSize = 4; Opc = X86::NOOPL; Displacement = 8; break;
+ case 5: NopSize = 5; Opc = X86::NOOPL; Displacement = 8;
+ IndexReg = X86::RAX; break;
+ case 6: NopSize = 6; Opc = X86::NOOPW; Displacement = 8;
+ IndexReg = X86::RAX; break;
+ case 7: NopSize = 7; Opc = X86::NOOPL; Displacement = 512; break;
+ case 8: NopSize = 8; Opc = X86::NOOPL; Displacement = 512;
+ IndexReg = X86::RAX; break;
+ case 9: NopSize = 9; Opc = X86::NOOPW; Displacement = 512;
+ IndexReg = X86::RAX; break;
+ default: NopSize = 10; Opc = X86::NOOPW; Displacement = 512;
+ IndexReg = X86::RAX; SegmentReg = X86::CS; break;
+ }
- switch (Opc) {
- default: llvm_unreachable("Unexpected opcode"); break;
- case X86::NOOP:
- OS.EmitInstruction(MCInstBuilder(Opc), STI);
- break;
- case X86::XCHG16ar:
- OS.EmitInstruction(MCInstBuilder(Opc).addReg(X86::AX), STI);
- break;
- case X86::NOOPL:
- case X86::NOOPW:
- OS.EmitInstruction(MCInstBuilder(Opc).addReg(BaseReg)
- .addImm(ScaleVal).addReg(IndexReg)
- .addImm(Displacement).addReg(SegmentReg), STI);
- break;
- }
- } // while (NumBytes)
+ unsigned NumPrefixes = std::min(NumBytes - NopSize, 5U);
+ NopSize += NumPrefixes;
+ for (unsigned i = 0; i != NumPrefixes; ++i)
+ OS.EmitBytes("\x66");
+
+ switch (Opc) {
+ default:
+ llvm_unreachable("Unexpected opcode");
+ break;
+ case X86::NOOP:
+ OS.EmitInstruction(MCInstBuilder(Opc), STI);
+ break;
+ case X86::XCHG16ar:
+ OS.EmitInstruction(MCInstBuilder(Opc).addReg(X86::AX), STI);
+ break;
+ case X86::NOOPL:
+ case X86::NOOPW:
+ OS.EmitInstruction(MCInstBuilder(Opc)
+ .addReg(BaseReg)
+ .addImm(ScaleVal)
+ .addReg(IndexReg)
+ .addImm(Displacement)
+ .addReg(SegmentReg),
+ STI);
+ break;
+ }
+ assert(NopSize <= NumBytes && "We overemitted?");
+ return NopSize;
+}
+
+/// \brief Emit the optimal amount of multi-byte nops on X86.
+static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit,
+ const MCSubtargetInfo &STI) {
+ unsigned NopsToEmit = NumBytes;
+ (void)NopsToEmit;
+ while (NumBytes) {
+ NumBytes -= EmitNop(OS, NumBytes, Is64Bit, STI);
+ assert(NopsToEmit >= NumBytes && "Emitted more than I asked for!");
+ }
}
void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI,
@@ -891,10 +896,10 @@ void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI,
void X86AsmPrinter::LowerFAULTING_LOAD_OP(const MachineInstr &MI,
X86MCInstLower &MCIL) {
- // FAULTING_LOAD_OP <def>, <handler label>, <load opcode>, <load operands>
+ // FAULTING_LOAD_OP <def>, <MBB handler>, <load opcode>, <load operands>
unsigned LoadDefRegister = MI.getOperand(0).getReg();
- MCSymbol *HandlerLabel = MI.getOperand(1).getMCSymbol();
+ MCSymbol *HandlerLabel = MI.getOperand(1).getMBB()->getSymbol();
unsigned LoadOpcode = MI.getOperand(2).getImm();
unsigned LoadOperandsBeginIdx = 3;
@@ -915,6 +920,43 @@ void X86AsmPrinter::LowerFAULTING_LOAD_OP(const MachineInstr &MI,
OutStreamer->EmitInstruction(LoadMI, getSubtargetInfo());
}
+void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI,
+ X86MCInstLower &MCIL) {
+ // PATCHABLE_OP minsize, opcode, operands
+
+ unsigned MinSize = MI.getOperand(0).getImm();
+ unsigned Opcode = MI.getOperand(1).getImm();
+
+ MCInst MCI;
+ MCI.setOpcode(Opcode);
+ for (auto &MO : make_range(MI.operands_begin() + 2, MI.operands_end()))
+ if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO))
+ MCI.addOperand(MaybeOperand.getValue());
+
+ SmallString<256> Code;
+ SmallVector<MCFixup, 4> Fixups;
+ raw_svector_ostream VecOS(Code);
+ CodeEmitter->encodeInstruction(MCI, VecOS, Fixups, getSubtargetInfo());
+
+ if (Code.size() < MinSize) {
+ if (MinSize == 2 && Opcode == X86::PUSH64r) {
+ // This is an optimization that lets us get away without emitting a nop in
+ // many cases.
+ //
+ // NB! In some cases the encoding for PUSH64r (e.g. PUSH64r %R9) takes two
+ // bytes too, so the check on MinSize is important.
+ MCI.setOpcode(X86::PUSH64rmr);
+ } else {
+ unsigned NopSize = EmitNop(*OutStreamer, MinSize, Subtarget->is64Bit(),
+ getSubtargetInfo());
+ assert(NopSize == MinSize && "Could not implement MinSize!");
+ (void) NopSize;
+ }
+ }
+
+ OutStreamer->EmitInstruction(MCI, getSubtargetInfo());
+}
+
// Lower a stackmap of the form:
// <id>, <shadowBytes>, ...
void X86AsmPrinter::LowerSTACKMAP(const MachineInstr &MI) {
@@ -982,14 +1024,107 @@ void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI,
getSubtargetInfo());
}
+void X86AsmPrinter::recordSled(MCSymbol *Sled, const MachineInstr &MI,
+ SledKind Kind) {
+ auto Fn = MI.getParent()->getParent()->getFunction();
+ auto Attr = Fn->getFnAttribute("function-instrument");
+ bool AlwaysInstrument =
+ Attr.isStringAttribute() && Attr.getValueAsString() == "xray-always";
+ Sleds.emplace_back(
+ XRayFunctionEntry{Sled, CurrentFnSym, Kind, AlwaysInstrument, Fn});
+}
+
+void X86AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI,
+ X86MCInstLower &MCIL) {
+ // We want to emit the following pattern:
+ //
+ // .Lxray_sled_N:
+ // .palign 2, ...
+ // jmp .tmpN
+ // # 9 bytes worth of noops
+ // .tmpN
+ //
+ // We need the 9 bytes because at runtime, we'd be patching over the full 11
+ // bytes with the following pattern:
+ //
+ // mov %r10, <function id, 32-bit> // 6 bytes
+ // call <relative offset, 32-bits> // 5 bytes
+ //
+ auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
+ OutStreamer->EmitLabel(CurSled);
+ OutStreamer->EmitCodeAlignment(4);
+ auto Target = OutContext.createTempSymbol();
+
+ // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as
+ // an operand (computed as an offset from the jmp instruction).
+ // FIXME: Find another less hacky way do force the relative jump.
+ OutStreamer->EmitBytes("\xeb\x09");
+ EmitNops(*OutStreamer, 9, Subtarget->is64Bit(), getSubtargetInfo());
+ OutStreamer->EmitLabel(Target);
+ recordSled(CurSled, MI, SledKind::FUNCTION_ENTER);
+}
+
+void X86AsmPrinter::LowerPATCHABLE_RET(const MachineInstr &MI,
+ X86MCInstLower &MCIL) {
+ // Since PATCHABLE_RET takes the opcode of the return statement as an
+ // argument, we use that to emit the correct form of the RET that we want.
+ // i.e. when we see this:
+ //
+ // PATCHABLE_RET X86::RET ...
+ //
+ // We should emit the RET followed by sleds.
+ //
+ // .Lxray_sled_N:
+ // ret # or equivalent instruction
+ // # 10 bytes worth of noops
+ //
+ // This just makes sure that the alignment for the next instruction is 2.
+ auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
+ OutStreamer->EmitLabel(CurSled);
+ unsigned OpCode = MI.getOperand(0).getImm();
+ MCInst Ret;
+ Ret.setOpcode(OpCode);
+ for (auto &MO : make_range(MI.operands_begin() + 1, MI.operands_end()))
+ if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO))
+ Ret.addOperand(MaybeOperand.getValue());
+ OutStreamer->EmitInstruction(Ret, getSubtargetInfo());
+ EmitNops(*OutStreamer, 10, Subtarget->is64Bit(), getSubtargetInfo());
+ recordSled(CurSled, MI, SledKind::FUNCTION_EXIT);
+}
+
+void X86AsmPrinter::EmitXRayTable() {
+ if (Sleds.empty())
+ return;
+ if (Subtarget->isTargetELF()) {
+ auto *Section = OutContext.getELFSection(
+ "xray_instr_map", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_GROUP | ELF::SHF_MERGE, 0,
+ CurrentFnSym->getName());
+ auto PrevSection = OutStreamer->getCurrentSectionOnly();
+ OutStreamer->SwitchSection(Section);
+ for (const auto &Sled : Sleds) {
+ OutStreamer->EmitSymbolValue(Sled.Sled, 8);
+ OutStreamer->EmitSymbolValue(CurrentFnSym, 8);
+ auto Kind = static_cast<uint8_t>(Sled.Kind);
+ OutStreamer->EmitBytes(
+ StringRef(reinterpret_cast<const char *>(&Kind), 1));
+ OutStreamer->EmitBytes(
+ StringRef(reinterpret_cast<const char *>(&Sled.AlwaysInstrument), 1));
+ OutStreamer->EmitZeros(14);
+ }
+ OutStreamer->SwitchSection(PrevSection);
+ }
+ Sleds.clear();
+}
+
// Returns instruction preceding MBBI in MachineFunction.
// If MBBI is the first instruction of the first basic block, returns null.
static MachineBasicBlock::const_iterator
PrevCrossBBInst(MachineBasicBlock::const_iterator MBBI) {
const MachineBasicBlock *MBB = MBBI->getParent();
while (MBBI == MBB->begin()) {
- if (MBB == MBB->getParent()->begin())
- return nullptr;
+ if (MBB == &MBB->getParent()->front())
+ return MachineBasicBlock::const_iterator();
MBB = MBB->getPrevNode();
MBBI = MBB->end();
}
@@ -1018,7 +1153,8 @@ static const Constant *getConstantFromPool(const MachineInstr &MI,
}
static std::string getShuffleComment(const MachineOperand &DstOp,
- const MachineOperand &SrcOp,
+ const MachineOperand &SrcOp1,
+ const MachineOperand &SrcOp2,
ArrayRef<int> Mask) {
std::string Comment;
@@ -1031,40 +1167,51 @@ static std::string getShuffleComment(const MachineOperand &DstOp,
return X86ATTInstPrinter::getRegisterName(RegNum);
};
+ // TODO: Add support for specifying an AVX512 style mask register in the comment.
StringRef DstName = DstOp.isReg() ? GetRegisterName(DstOp.getReg()) : "mem";
- StringRef SrcName = SrcOp.isReg() ? GetRegisterName(SrcOp.getReg()) : "mem";
+ StringRef Src1Name =
+ SrcOp1.isReg() ? GetRegisterName(SrcOp1.getReg()) : "mem";
+ StringRef Src2Name =
+ SrcOp2.isReg() ? GetRegisterName(SrcOp2.getReg()) : "mem";
+
+ // One source operand, fix the mask to print all elements in one span.
+ SmallVector<int, 8> ShuffleMask(Mask.begin(), Mask.end());
+ if (Src1Name == Src2Name)
+ for (int i = 0, e = ShuffleMask.size(); i != e; ++i)
+ if (ShuffleMask[i] >= e)
+ ShuffleMask[i] -= e;
raw_string_ostream CS(Comment);
CS << DstName << " = ";
- bool NeedComma = false;
- bool InSrc = false;
- for (int M : Mask) {
- // Wrap up any prior entry...
- if (M == SM_SentinelZero && InSrc) {
- InSrc = false;
- CS << "]";
- }
- if (NeedComma)
+ for (int i = 0, e = ShuffleMask.size(); i != e; ++i) {
+ if (i != 0)
CS << ",";
- else
- NeedComma = true;
-
- // Print this shuffle...
- if (M == SM_SentinelZero) {
+ if (ShuffleMask[i] == SM_SentinelZero) {
CS << "zero";
- } else {
- if (!InSrc) {
- InSrc = true;
- CS << SrcName << "[";
- }
- if (M == SM_SentinelUndef)
+ continue;
+ }
+
+ // Otherwise, it must come from src1 or src2. Print the span of elements
+ // that comes from this src.
+ bool isSrc1 = ShuffleMask[i] < (int)e;
+ CS << (isSrc1 ? Src1Name : Src2Name) << '[';
+
+ bool IsFirst = true;
+ while (i != e && ShuffleMask[i] != SM_SentinelZero &&
+ (ShuffleMask[i] < (int)e) == isSrc1) {
+ if (!IsFirst)
+ CS << ',';
+ else
+ IsFirst = false;
+ if (ShuffleMask[i] == SM_SentinelUndef)
CS << "u";
else
- CS << M;
+ CS << ShuffleMask[i] % (int)e;
+ ++i;
}
+ CS << ']';
+ --i; // For loop increments element #.
}
- if (InSrc)
- CS << "]";
CS.flush();
return Comment;
@@ -1202,12 +1349,21 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
case TargetOpcode::FAULTING_LOAD_OP:
return LowerFAULTING_LOAD_OP(*MI, MCInstLowering);
+ case TargetOpcode::PATCHABLE_OP:
+ return LowerPATCHABLE_OP(*MI, MCInstLowering);
+
case TargetOpcode::STACKMAP:
return LowerSTACKMAP(*MI);
case TargetOpcode::PATCHPOINT:
return LowerPATCHPOINT(*MI, MCInstLowering);
+ case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
+ return LowerPATCHABLE_FUNCTION_ENTER(*MI, MCInstLowering);
+
+ case TargetOpcode::PATCHABLE_RET:
+ return LowerPATCHABLE_RET(*MI, MCInstLowering);
+
case X86::MORESTACK_RET:
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
return;
@@ -1254,7 +1410,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
case X86::SEH_Epilogue: {
MachineBasicBlock::const_iterator MBBI(MI);
// Check if preceded by a call and emit nop if so.
- for (MBBI = PrevCrossBBInst(MBBI); MBBI; MBBI = PrevCrossBBInst(MBBI)) {
+ for (MBBI = PrevCrossBBInst(MBBI);
+ MBBI != MachineBasicBlock::const_iterator();
+ MBBI = PrevCrossBBInst(MBBI)) {
// Conservatively assume that pseudo instructions don't emit code and keep
// looking for a call. We may emit an unnecessary nop in some cases.
if (!MBBI->isPseudo()) {
@@ -1313,14 +1471,38 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
SmallVector<int, 16> Mask;
DecodePSHUFBMask(C, Mask);
if (!Mask.empty())
- OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp, Mask));
+ OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp, SrcOp, Mask));
}
break;
}
- case X86::VPERMILPSrm:
+
case X86::VPERMILPDrm:
+ case X86::VPERMILPDYrm:
+ case X86::VPERMILPDZ128rm:
+ case X86::VPERMILPDZ256rm:
+ case X86::VPERMILPDZrm: {
+ if (!OutStreamer->isVerboseAsm())
+ break;
+ assert(MI->getNumOperands() > 5 &&
+ "We should always have at least 5 operands!");
+ const MachineOperand &DstOp = MI->getOperand(0);
+ const MachineOperand &SrcOp = MI->getOperand(1);
+ const MachineOperand &MaskOp = MI->getOperand(5);
+
+ if (auto *C = getConstantFromPool(*MI, MaskOp)) {
+ SmallVector<int, 8> Mask;
+ DecodeVPERMILPMask(C, 64, Mask);
+ if (!Mask.empty())
+ OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp, SrcOp, Mask));
+ }
+ break;
+ }
+
+ case X86::VPERMILPSrm:
case X86::VPERMILPSYrm:
- case X86::VPERMILPDYrm: {
+ case X86::VPERMILPSZ128rm:
+ case X86::VPERMILPSZ256rm:
+ case X86::VPERMILPSZrm: {
if (!OutStreamer->isVerboseAsm())
break;
assert(MI->getNumOperands() > 5 &&
@@ -1329,18 +1511,63 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
const MachineOperand &SrcOp = MI->getOperand(1);
const MachineOperand &MaskOp = MI->getOperand(5);
+ if (auto *C = getConstantFromPool(*MI, MaskOp)) {
+ SmallVector<int, 16> Mask;
+ DecodeVPERMILPMask(C, 32, Mask);
+ if (!Mask.empty())
+ OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp, SrcOp, Mask));
+ }
+ break;
+ }
+
+ case X86::VPERMIL2PDrm:
+ case X86::VPERMIL2PSrm:
+ case X86::VPERMIL2PDrmY:
+ case X86::VPERMIL2PSrmY: {
+ if (!OutStreamer->isVerboseAsm())
+ break;
+ assert(MI->getNumOperands() > 7 &&
+ "We should always have at least 7 operands!");
+ const MachineOperand &DstOp = MI->getOperand(0);
+ const MachineOperand &SrcOp1 = MI->getOperand(1);
+ const MachineOperand &SrcOp2 = MI->getOperand(2);
+ const MachineOperand &MaskOp = MI->getOperand(6);
+ const MachineOperand &CtrlOp = MI->getOperand(MI->getNumOperands() - 1);
+
+ if (!CtrlOp.isImm())
+ break;
+
unsigned ElSize;
switch (MI->getOpcode()) {
default: llvm_unreachable("Invalid opcode");
- case X86::VPERMILPSrm: case X86::VPERMILPSYrm: ElSize = 32; break;
- case X86::VPERMILPDrm: case X86::VPERMILPDYrm: ElSize = 64; break;
+ case X86::VPERMIL2PSrm: case X86::VPERMIL2PSrmY: ElSize = 32; break;
+ case X86::VPERMIL2PDrm: case X86::VPERMIL2PDrmY: ElSize = 64; break;
+ }
+
+ if (auto *C = getConstantFromPool(*MI, MaskOp)) {
+ SmallVector<int, 16> Mask;
+ DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Mask);
+ if (!Mask.empty())
+ OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp1, SrcOp2, Mask));
}
+ break;
+ }
+
+ case X86::VPPERMrrm: {
+ if (!OutStreamer->isVerboseAsm())
+ break;
+ assert(MI->getNumOperands() > 6 &&
+ "We should always have at least 6 operands!");
+ const MachineOperand &DstOp = MI->getOperand(0);
+ const MachineOperand &SrcOp1 = MI->getOperand(1);
+ const MachineOperand &SrcOp2 = MI->getOperand(2);
+ const MachineOperand &MaskOp = MI->getOperand(6);
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
SmallVector<int, 16> Mask;
- DecodeVPERMILPMask(C, ElSize, Mask);
+ DecodeVPPERMMask(C, Mask);
if (!Mask.empty())
- OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp, Mask));
+ OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp1, SrcOp2, Mask));
}
break;
}
@@ -1413,7 +1640,7 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
CS << CI->getZExtValue();
} else {
// print multi-word constant as (w0,w1)
- auto Val = CI->getValue();
+ const auto &Val = CI->getValue();
CS << "(";
for (int i = 0, N = Val.getNumWords(); i < N; ++i) {
if (i > 0)
@@ -1446,7 +1673,7 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
// is at the end of the shadow.
if (MI->isCall()) {
// Count then size of the call towards the shadow
- SMShadowTracker.count(TmpInst, getSubtargetInfo());
+ SMShadowTracker.count(TmpInst, getSubtargetInfo(), CodeEmitter.get());
// Then flush the shadow so that we fill with nops before the call, not
// after it.
SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());
diff --git a/lib/Target/X86/X86MachineFunctionInfo.h b/lib/Target/X86/X86MachineFunctionInfo.h
index 00515dde55682..d517d82537a78 100644
--- a/lib/Target/X86/X86MachineFunctionInfo.h
+++ b/lib/Target/X86/X86MachineFunctionInfo.h
@@ -17,7 +17,6 @@
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineValueType.h"
-#include <vector>
namespace llvm {
@@ -96,6 +95,12 @@ class X86MachineFunctionInfo : public MachineFunctionInfo {
/// copies.
bool IsSplitCSR = false;
+ /// True if this function uses the red zone.
+ bool UsesRedZone = false;
+
+ /// True if this function has WIN_ALLOCA instructions.
+ bool HasWinAlloca = false;
+
private:
/// ForwardedMustTailRegParms - A list of virtual and physical registers
/// that must be forwarded to every musttail call.
@@ -167,6 +172,12 @@ public:
bool isSplitCSR() const { return IsSplitCSR; }
void setIsSplitCSR(bool s) { IsSplitCSR = s; }
+
+ bool getUsesRedZone() const { return UsesRedZone; }
+ void setUsesRedZone(bool V) { UsesRedZone = V; }
+
+ bool hasWinAlloca() const { return HasWinAlloca; }
+ void setHasWinAlloca(bool v) { HasWinAlloca = v; }
};
} // End llvm namespace
diff --git a/lib/Target/X86/X86OptimizeLEAs.cpp b/lib/Target/X86/X86OptimizeLEAs.cpp
index 45cc0aef1d934..4da0fddda3953 100644
--- a/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
// This file defines the pass that performs some optimizations with LEA
-// instructions in order to improve code size.
+// instructions in order to improve performance and code size.
// Currently, it does two things:
// 1) If there are two LEA instructions calculating addresses which only differ
// by displacement inside a basic block, one of them is removed.
@@ -24,6 +24,7 @@
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Function.h"
@@ -35,13 +36,186 @@ using namespace llvm;
#define DEBUG_TYPE "x86-optimize-LEAs"
-static cl::opt<bool> EnableX86LEAOpt("enable-x86-lea-opt", cl::Hidden,
- cl::desc("X86: Enable LEA optimizations."),
- cl::init(false));
+static cl::opt<bool>
+ DisableX86LEAOpt("disable-x86-lea-opt", cl::Hidden,
+ cl::desc("X86: Disable LEA optimizations."),
+ cl::init(false));
STATISTIC(NumSubstLEAs, "Number of LEA instruction substitutions");
STATISTIC(NumRedundantLEAs, "Number of redundant LEA instructions removed");
+class MemOpKey;
+
+/// \brief Returns a hash table key based on memory operands of \p MI. The
+/// number of the first memory operand of \p MI is specified through \p N.
+static inline MemOpKey getMemOpKey(const MachineInstr &MI, unsigned N);
+
+/// \brief Returns true if two machine operands are identical and they are not
+/// physical registers.
+static inline bool isIdenticalOp(const MachineOperand &MO1,
+ const MachineOperand &MO2);
+
+/// \brief Returns true if two address displacement operands are of the same
+/// type and use the same symbol/index/address regardless of the offset.
+static bool isSimilarDispOp(const MachineOperand &MO1,
+ const MachineOperand &MO2);
+
+/// \brief Returns true if the instruction is LEA.
+static inline bool isLEA(const MachineInstr &MI);
+
+/// A key based on instruction's memory operands.
+class MemOpKey {
+public:
+ MemOpKey(const MachineOperand *Base, const MachineOperand *Scale,
+ const MachineOperand *Index, const MachineOperand *Segment,
+ const MachineOperand *Disp)
+ : Disp(Disp) {
+ Operands[0] = Base;
+ Operands[1] = Scale;
+ Operands[2] = Index;
+ Operands[3] = Segment;
+ }
+
+ bool operator==(const MemOpKey &Other) const {
+ // Addresses' bases, scales, indices and segments must be identical.
+ for (int i = 0; i < 4; ++i)
+ if (!isIdenticalOp(*Operands[i], *Other.Operands[i]))
+ return false;
+
+ // Addresses' displacements don't have to be exactly the same. It only
+ // matters that they use the same symbol/index/address. Immediates' or
+ // offsets' differences will be taken care of during instruction
+ // substitution.
+ return isSimilarDispOp(*Disp, *Other.Disp);
+ }
+
+ // Address' base, scale, index and segment operands.
+ const MachineOperand *Operands[4];
+
+ // Address' displacement operand.
+ const MachineOperand *Disp;
+};
+
+/// Provide DenseMapInfo for MemOpKey.
+namespace llvm {
+template <> struct DenseMapInfo<MemOpKey> {
+ typedef DenseMapInfo<const MachineOperand *> PtrInfo;
+
+ static inline MemOpKey getEmptyKey() {
+ return MemOpKey(PtrInfo::getEmptyKey(), PtrInfo::getEmptyKey(),
+ PtrInfo::getEmptyKey(), PtrInfo::getEmptyKey(),
+ PtrInfo::getEmptyKey());
+ }
+
+ static inline MemOpKey getTombstoneKey() {
+ return MemOpKey(PtrInfo::getTombstoneKey(), PtrInfo::getTombstoneKey(),
+ PtrInfo::getTombstoneKey(), PtrInfo::getTombstoneKey(),
+ PtrInfo::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const MemOpKey &Val) {
+ // Checking any field of MemOpKey is enough to determine if the key is
+ // empty or tombstone.
+ assert(Val.Disp != PtrInfo::getEmptyKey() && "Cannot hash the empty key");
+ assert(Val.Disp != PtrInfo::getTombstoneKey() &&
+ "Cannot hash the tombstone key");
+
+ hash_code Hash = hash_combine(*Val.Operands[0], *Val.Operands[1],
+ *Val.Operands[2], *Val.Operands[3]);
+
+ // If the address displacement is an immediate, it should not affect the
+ // hash so that memory operands which differ only be immediate displacement
+ // would have the same hash. If the address displacement is something else,
+ // we should reflect symbol/index/address in the hash.
+ switch (Val.Disp->getType()) {
+ case MachineOperand::MO_Immediate:
+ break;
+ case MachineOperand::MO_ConstantPoolIndex:
+ case MachineOperand::MO_JumpTableIndex:
+ Hash = hash_combine(Hash, Val.Disp->getIndex());
+ break;
+ case MachineOperand::MO_ExternalSymbol:
+ Hash = hash_combine(Hash, Val.Disp->getSymbolName());
+ break;
+ case MachineOperand::MO_GlobalAddress:
+ Hash = hash_combine(Hash, Val.Disp->getGlobal());
+ break;
+ case MachineOperand::MO_BlockAddress:
+ Hash = hash_combine(Hash, Val.Disp->getBlockAddress());
+ break;
+ case MachineOperand::MO_MCSymbol:
+ Hash = hash_combine(Hash, Val.Disp->getMCSymbol());
+ break;
+ case MachineOperand::MO_MachineBasicBlock:
+ Hash = hash_combine(Hash, Val.Disp->getMBB());
+ break;
+ default:
+ llvm_unreachable("Invalid address displacement operand");
+ }
+
+ return (unsigned)Hash;
+ }
+
+ static bool isEqual(const MemOpKey &LHS, const MemOpKey &RHS) {
+ // Checking any field of MemOpKey is enough to determine if the key is
+ // empty or tombstone.
+ if (RHS.Disp == PtrInfo::getEmptyKey())
+ return LHS.Disp == PtrInfo::getEmptyKey();
+ if (RHS.Disp == PtrInfo::getTombstoneKey())
+ return LHS.Disp == PtrInfo::getTombstoneKey();
+ return LHS == RHS;
+ }
+};
+}
+
+static inline MemOpKey getMemOpKey(const MachineInstr &MI, unsigned N) {
+ assert((isLEA(MI) || MI.mayLoadOrStore()) &&
+ "The instruction must be a LEA, a load or a store");
+ return MemOpKey(&MI.getOperand(N + X86::AddrBaseReg),
+ &MI.getOperand(N + X86::AddrScaleAmt),
+ &MI.getOperand(N + X86::AddrIndexReg),
+ &MI.getOperand(N + X86::AddrSegmentReg),
+ &MI.getOperand(N + X86::AddrDisp));
+}
+
+static inline bool isIdenticalOp(const MachineOperand &MO1,
+ const MachineOperand &MO2) {
+ return MO1.isIdenticalTo(MO2) &&
+ (!MO1.isReg() ||
+ !TargetRegisterInfo::isPhysicalRegister(MO1.getReg()));
+}
+
+#ifndef NDEBUG
+static bool isValidDispOp(const MachineOperand &MO) {
+ return MO.isImm() || MO.isCPI() || MO.isJTI() || MO.isSymbol() ||
+ MO.isGlobal() || MO.isBlockAddress() || MO.isMCSymbol() || MO.isMBB();
+}
+#endif
+
+static bool isSimilarDispOp(const MachineOperand &MO1,
+ const MachineOperand &MO2) {
+ assert(isValidDispOp(MO1) && isValidDispOp(MO2) &&
+ "Address displacement operand is not valid");
+ return (MO1.isImm() && MO2.isImm()) ||
+ (MO1.isCPI() && MO2.isCPI() && MO1.getIndex() == MO2.getIndex()) ||
+ (MO1.isJTI() && MO2.isJTI() && MO1.getIndex() == MO2.getIndex()) ||
+ (MO1.isSymbol() && MO2.isSymbol() &&
+ MO1.getSymbolName() == MO2.getSymbolName()) ||
+ (MO1.isGlobal() && MO2.isGlobal() &&
+ MO1.getGlobal() == MO2.getGlobal()) ||
+ (MO1.isBlockAddress() && MO2.isBlockAddress() &&
+ MO1.getBlockAddress() == MO2.getBlockAddress()) ||
+ (MO1.isMCSymbol() && MO2.isMCSymbol() &&
+ MO1.getMCSymbol() == MO2.getMCSymbol()) ||
+ (MO1.isMBB() && MO2.isMBB() && MO1.getMBB() == MO2.getMBB());
+}
+
+static inline bool isLEA(const MachineInstr &MI) {
+ unsigned Opcode = MI.getOpcode();
+ return Opcode == X86::LEA16r || Opcode == X86::LEA32r ||
+ Opcode == X86::LEA64r || Opcode == X86::LEA64_32r;
+}
+
namespace {
class OptimizeLEAPass : public MachineFunctionPass {
public:
@@ -55,51 +229,43 @@ public:
bool runOnMachineFunction(MachineFunction &MF) override;
private:
+ typedef DenseMap<MemOpKey, SmallVector<MachineInstr *, 16>> MemOpMap;
+
/// \brief Returns a distance between two instructions inside one basic block.
/// Negative result means, that instructions occur in reverse order.
int calcInstrDist(const MachineInstr &First, const MachineInstr &Last);
/// \brief Choose the best \p LEA instruction from the \p List to replace
/// address calculation in \p MI instruction. Return the address displacement
- /// and the distance between \p MI and the choosen \p LEA in \p AddrDispShift
- /// and \p Dist.
+ /// and the distance between \p MI and the choosen \p BestLEA in
+ /// \p AddrDispShift and \p Dist.
bool chooseBestLEA(const SmallVectorImpl<MachineInstr *> &List,
- const MachineInstr &MI, MachineInstr *&LEA,
+ const MachineInstr &MI, MachineInstr *&BestLEA,
int64_t &AddrDispShift, int &Dist);
- /// \brief Returns true if two machine operand are identical and they are not
- /// physical registers.
- bool isIdenticalOp(const MachineOperand &MO1, const MachineOperand &MO2);
-
- /// \brief Returns true if the instruction is LEA.
- bool isLEA(const MachineInstr &MI);
+ /// \brief Returns the difference between addresses' displacements of \p MI1
+ /// and \p MI2. The numbers of the first memory operands for the instructions
+ /// are specified through \p N1 and \p N2.
+ int64_t getAddrDispShift(const MachineInstr &MI1, unsigned N1,
+ const MachineInstr &MI2, unsigned N2) const;
/// \brief Returns true if the \p Last LEA instruction can be replaced by the
/// \p First. The difference between displacements of the addresses calculated
/// by these LEAs is returned in \p AddrDispShift. It'll be used for proper
/// replacement of the \p Last LEA's uses with the \p First's def register.
bool isReplaceable(const MachineInstr &First, const MachineInstr &Last,
- int64_t &AddrDispShift);
-
- /// \brief Returns true if two instructions have memory operands that only
- /// differ by displacement. The numbers of the first memory operands for both
- /// instructions are specified through \p N1 and \p N2. The address
- /// displacement is returned through AddrDispShift.
- bool isSimilarMemOp(const MachineInstr &MI1, unsigned N1,
- const MachineInstr &MI2, unsigned N2,
- int64_t &AddrDispShift);
+ int64_t &AddrDispShift) const;
/// \brief Find all LEA instructions in the basic block. Also, assign position
/// numbers to all instructions in the basic block to speed up calculation of
/// distance between them.
- void findLEAs(const MachineBasicBlock &MBB,
- SmallVectorImpl<MachineInstr *> &List);
+ void findLEAs(const MachineBasicBlock &MBB, MemOpMap &LEAs);
/// \brief Removes redundant address calculations.
- bool removeRedundantAddrCalc(const SmallVectorImpl<MachineInstr *> &List);
+ bool removeRedundantAddrCalc(MemOpMap &LEAs);
/// \brief Removes LEAs which calculate similar addresses.
- bool removeRedundantLEAs(SmallVectorImpl<MachineInstr *> &List);
+ bool removeRedundantLEAs(MemOpMap &LEAs);
DenseMap<const MachineInstr *, unsigned> InstrPos;
@@ -137,22 +303,20 @@ int OptimizeLEAPass::calcInstrDist(const MachineInstr &First,
// 4) The LEA should be as close to MI as possible, and prior to it if
// possible.
bool OptimizeLEAPass::chooseBestLEA(const SmallVectorImpl<MachineInstr *> &List,
- const MachineInstr &MI, MachineInstr *&LEA,
+ const MachineInstr &MI,
+ MachineInstr *&BestLEA,
int64_t &AddrDispShift, int &Dist) {
const MachineFunction *MF = MI.getParent()->getParent();
const MCInstrDesc &Desc = MI.getDesc();
- int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags, MI.getOpcode()) +
+ int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags) +
X86II::getOperandBias(Desc);
- LEA = nullptr;
+ BestLEA = nullptr;
// Loop over all LEA instructions.
for (auto DefMI : List) {
- int64_t AddrDispShiftTemp = 0;
-
- // Compare instructions memory operands.
- if (!isSimilarMemOp(MI, MemOpNo, *DefMI, 1, AddrDispShiftTemp))
- continue;
+ // Get new address displacement.
+ int64_t AddrDispShiftTemp = getAddrDispShift(MI, MemOpNo, *DefMI, 1);
// Make sure address displacement fits 4 bytes.
if (!isInt<32>(AddrDispShiftTemp))
@@ -174,14 +338,14 @@ bool OptimizeLEAPass::chooseBestLEA(const SmallVectorImpl<MachineInstr *> &List,
int DistTemp = calcInstrDist(*DefMI, MI);
assert(DistTemp != 0 &&
"The distance between two different instructions cannot be zero");
- if (DistTemp > 0 || LEA == nullptr) {
+ if (DistTemp > 0 || BestLEA == nullptr) {
// Do not update return LEA, if the current one provides a displacement
// which fits in 1 byte, while the new candidate does not.
- if (LEA != nullptr && !isInt<8>(AddrDispShiftTemp) &&
+ if (BestLEA != nullptr && !isInt<8>(AddrDispShiftTemp) &&
isInt<8>(AddrDispShift))
continue;
- LEA = DefMI;
+ BestLEA = DefMI;
AddrDispShift = AddrDispShiftTemp;
Dist = DistTemp;
}
@@ -191,20 +355,28 @@ bool OptimizeLEAPass::chooseBestLEA(const SmallVectorImpl<MachineInstr *> &List,
break;
}
- return LEA != nullptr;
-}
-
-bool OptimizeLEAPass::isIdenticalOp(const MachineOperand &MO1,
- const MachineOperand &MO2) {
- return MO1.isIdenticalTo(MO2) &&
- (!MO1.isReg() ||
- !TargetRegisterInfo::isPhysicalRegister(MO1.getReg()));
+ return BestLEA != nullptr;
}
-bool OptimizeLEAPass::isLEA(const MachineInstr &MI) {
- unsigned Opcode = MI.getOpcode();
- return Opcode == X86::LEA16r || Opcode == X86::LEA32r ||
- Opcode == X86::LEA64r || Opcode == X86::LEA64_32r;
+// Get the difference between the addresses' displacements of the two
+// instructions \p MI1 and \p MI2. The numbers of the first memory operands are
+// passed through \p N1 and \p N2.
+int64_t OptimizeLEAPass::getAddrDispShift(const MachineInstr &MI1, unsigned N1,
+ const MachineInstr &MI2,
+ unsigned N2) const {
+ const MachineOperand &Op1 = MI1.getOperand(N1 + X86::AddrDisp);
+ const MachineOperand &Op2 = MI2.getOperand(N2 + X86::AddrDisp);
+
+ assert(isSimilarDispOp(Op1, Op2) &&
+ "Address displacement operands are not compatible");
+
+ // After the assert above we can be sure that both operands are of the same
+ // valid type and use the same symbol/index/address, thus displacement shift
+ // calculation is rather simple.
+ if (Op1.isJTI())
+ return 0;
+ return Op1.isImm() ? Op1.getImm() - Op2.getImm()
+ : Op1.getOffset() - Op2.getOffset();
}
// Check that the Last LEA can be replaced by the First LEA. To be so,
@@ -215,13 +387,12 @@ bool OptimizeLEAPass::isLEA(const MachineInstr &MI) {
// register is used only as address base.
bool OptimizeLEAPass::isReplaceable(const MachineInstr &First,
const MachineInstr &Last,
- int64_t &AddrDispShift) {
+ int64_t &AddrDispShift) const {
assert(isLEA(First) && isLEA(Last) &&
"The function works only with LEA instructions");
- // Compare instructions' memory operands.
- if (!isSimilarMemOp(Last, 1, First, 1, AddrDispShift))
- return false;
+ // Get new address displacement.
+ AddrDispShift = getAddrDispShift(Last, 1, First, 1);
// Make sure that LEA def registers belong to the same class. There may be
// instructions (like MOV8mr_NOREX) which allow a limited set of registers to
@@ -239,7 +410,7 @@ bool OptimizeLEAPass::isReplaceable(const MachineInstr &First,
// Get the number of the first memory operand.
const MCInstrDesc &Desc = MI.getDesc();
- int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags, MI.getOpcode());
+ int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
// If the use instruction has no memory operand - the LEA is not
// replaceable.
@@ -270,36 +441,7 @@ bool OptimizeLEAPass::isReplaceable(const MachineInstr &First,
return true;
}
-// Check if MI1 and MI2 have memory operands which represent addresses that
-// differ only by displacement.
-bool OptimizeLEAPass::isSimilarMemOp(const MachineInstr &MI1, unsigned N1,
- const MachineInstr &MI2, unsigned N2,
- int64_t &AddrDispShift) {
- // Address base, scale, index and segment operands must be identical.
- static const int IdenticalOpNums[] = {X86::AddrBaseReg, X86::AddrScaleAmt,
- X86::AddrIndexReg, X86::AddrSegmentReg};
- for (auto &N : IdenticalOpNums)
- if (!isIdenticalOp(MI1.getOperand(N1 + N), MI2.getOperand(N2 + N)))
- return false;
-
- // Address displacement operands may differ by a constant.
- const MachineOperand *Op1 = &MI1.getOperand(N1 + X86::AddrDisp);
- const MachineOperand *Op2 = &MI2.getOperand(N2 + X86::AddrDisp);
- if (!isIdenticalOp(*Op1, *Op2)) {
- if (Op1->isImm() && Op2->isImm())
- AddrDispShift = Op1->getImm() - Op2->getImm();
- else if (Op1->isGlobal() && Op2->isGlobal() &&
- Op1->getGlobal() == Op2->getGlobal())
- AddrDispShift = Op1->getOffset() - Op2->getOffset();
- else
- return false;
- }
-
- return true;
-}
-
-void OptimizeLEAPass::findLEAs(const MachineBasicBlock &MBB,
- SmallVectorImpl<MachineInstr *> &List) {
+void OptimizeLEAPass::findLEAs(const MachineBasicBlock &MBB, MemOpMap &LEAs) {
unsigned Pos = 0;
for (auto &MI : MBB) {
// Assign the position number to the instruction. Note that we are going to
@@ -310,24 +452,22 @@ void OptimizeLEAPass::findLEAs(const MachineBasicBlock &MBB,
InstrPos[&MI] = Pos += 2;
if (isLEA(MI))
- List.push_back(const_cast<MachineInstr *>(&MI));
+ LEAs[getMemOpKey(MI, 1)].push_back(const_cast<MachineInstr *>(&MI));
}
}
// Try to find load and store instructions which recalculate addresses already
// calculated by some LEA and replace their memory operands with its def
// register.
-bool OptimizeLEAPass::removeRedundantAddrCalc(
- const SmallVectorImpl<MachineInstr *> &List) {
+bool OptimizeLEAPass::removeRedundantAddrCalc(MemOpMap &LEAs) {
bool Changed = false;
- assert(List.size() > 0);
- MachineBasicBlock *MBB = List[0]->getParent();
+ assert(!LEAs.empty());
+ MachineBasicBlock *MBB = (*LEAs.begin()->second.begin())->getParent();
// Process all instructions in basic block.
for (auto I = MBB->begin(), E = MBB->end(); I != E;) {
MachineInstr &MI = *I++;
- unsigned Opcode = MI.getOpcode();
// Instruction must be load or store.
if (!MI.mayLoadOrStore())
@@ -335,7 +475,7 @@ bool OptimizeLEAPass::removeRedundantAddrCalc(
// Get the number of the first memory operand.
const MCInstrDesc &Desc = MI.getDesc();
- int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags, Opcode);
+ int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
// If instruction has no memory operand - skip it.
if (MemOpNo < 0)
@@ -347,7 +487,8 @@ bool OptimizeLEAPass::removeRedundantAddrCalc(
MachineInstr *DefMI;
int64_t AddrDispShift;
int Dist;
- if (!chooseBestLEA(List, MI, DefMI, AddrDispShift, Dist))
+ if (!chooseBestLEA(LEAs[getMemOpKey(MI, MemOpNo)], MI, DefMI, AddrDispShift,
+ Dist))
continue;
// If LEA occurs before current instruction, we can freely replace
@@ -362,9 +503,10 @@ bool OptimizeLEAPass::removeRedundantAddrCalc(
InstrPos[DefMI] = InstrPos[&MI] - 1;
// Make sure the instructions' position numbers are sane.
- assert(((InstrPos[DefMI] == 1 && DefMI == MBB->begin()) ||
+ assert(((InstrPos[DefMI] == 1 &&
+ MachineBasicBlock::iterator(DefMI) == MBB->begin()) ||
InstrPos[DefMI] >
- InstrPos[std::prev(MachineBasicBlock::iterator(DefMI))]) &&
+ InstrPos[&*std::prev(MachineBasicBlock::iterator(DefMI))]) &&
"Instruction positioning is broken");
}
@@ -393,75 +535,78 @@ bool OptimizeLEAPass::removeRedundantAddrCalc(
}
// Try to find similar LEAs in the list and replace one with another.
-bool
-OptimizeLEAPass::removeRedundantLEAs(SmallVectorImpl<MachineInstr *> &List) {
+bool OptimizeLEAPass::removeRedundantLEAs(MemOpMap &LEAs) {
bool Changed = false;
- // Loop over all LEA pairs.
- auto I1 = List.begin();
- while (I1 != List.end()) {
- MachineInstr &First = **I1;
- auto I2 = std::next(I1);
- while (I2 != List.end()) {
- MachineInstr &Last = **I2;
- int64_t AddrDispShift;
-
- // LEAs should be in occurence order in the list, so we can freely
- // replace later LEAs with earlier ones.
- assert(calcInstrDist(First, Last) > 0 &&
- "LEAs must be in occurence order in the list");
-
- // Check that the Last LEA instruction can be replaced by the First.
- if (!isReplaceable(First, Last, AddrDispShift)) {
- ++I2;
- continue;
- }
-
- // Loop over all uses of the Last LEA and update their operands. Note that
- // the correctness of this has already been checked in the isReplaceable
- // function.
- for (auto UI = MRI->use_begin(Last.getOperand(0).getReg()),
- UE = MRI->use_end();
- UI != UE;) {
- MachineOperand &MO = *UI++;
- MachineInstr &MI = *MO.getParent();
-
- // Get the number of the first memory operand.
- const MCInstrDesc &Desc = MI.getDesc();
- int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags, MI.getOpcode()) +
- X86II::getOperandBias(Desc);
-
- // Update address base.
- MO.setReg(First.getOperand(0).getReg());
-
- // Update address disp.
- MachineOperand *Op = &MI.getOperand(MemOpNo + X86::AddrDisp);
- if (Op->isImm())
- Op->setImm(Op->getImm() + AddrDispShift);
- else if (Op->isGlobal())
- Op->setOffset(Op->getOffset() + AddrDispShift);
- else
- llvm_unreachable("Invalid address displacement operand");
+ // Loop over all entries in the table.
+ for (auto &E : LEAs) {
+ auto &List = E.second;
+
+ // Loop over all LEA pairs.
+ auto I1 = List.begin();
+ while (I1 != List.end()) {
+ MachineInstr &First = **I1;
+ auto I2 = std::next(I1);
+ while (I2 != List.end()) {
+ MachineInstr &Last = **I2;
+ int64_t AddrDispShift;
+
+ // LEAs should be in occurence order in the list, so we can freely
+ // replace later LEAs with earlier ones.
+ assert(calcInstrDist(First, Last) > 0 &&
+ "LEAs must be in occurence order in the list");
+
+ // Check that the Last LEA instruction can be replaced by the First.
+ if (!isReplaceable(First, Last, AddrDispShift)) {
+ ++I2;
+ continue;
+ }
+
+ // Loop over all uses of the Last LEA and update their operands. Note
+ // that the correctness of this has already been checked in the
+ // isReplaceable function.
+ for (auto UI = MRI->use_begin(Last.getOperand(0).getReg()),
+ UE = MRI->use_end();
+ UI != UE;) {
+ MachineOperand &MO = *UI++;
+ MachineInstr &MI = *MO.getParent();
+
+ // Get the number of the first memory operand.
+ const MCInstrDesc &Desc = MI.getDesc();
+ int MemOpNo =
+ X86II::getMemoryOperandNo(Desc.TSFlags) +
+ X86II::getOperandBias(Desc);
+
+ // Update address base.
+ MO.setReg(First.getOperand(0).getReg());
+
+ // Update address disp.
+ MachineOperand &Op = MI.getOperand(MemOpNo + X86::AddrDisp);
+ if (Op.isImm())
+ Op.setImm(Op.getImm() + AddrDispShift);
+ else if (!Op.isJTI())
+ Op.setOffset(Op.getOffset() + AddrDispShift);
+ }
+
+ // Since we can possibly extend register lifetime, clear kill flags.
+ MRI->clearKillFlags(First.getOperand(0).getReg());
+
+ ++NumRedundantLEAs;
+ DEBUG(dbgs() << "OptimizeLEAs: Remove redundant LEA: "; Last.dump(););
+
+ // By this moment, all of the Last LEA's uses must be replaced. So we
+ // can freely remove it.
+ assert(MRI->use_empty(Last.getOperand(0).getReg()) &&
+ "The LEA's def register must have no uses");
+ Last.eraseFromParent();
+
+ // Erase removed LEA from the list.
+ I2 = List.erase(I2);
+
+ Changed = true;
}
-
- // Since we can possibly extend register lifetime, clear kill flags.
- MRI->clearKillFlags(First.getOperand(0).getReg());
-
- ++NumRedundantLEAs;
- DEBUG(dbgs() << "OptimizeLEAs: Remove redundant LEA: "; Last.dump(););
-
- // By this moment, all of the Last LEA's uses must be replaced. So we can
- // freely remove it.
- assert(MRI->use_empty(Last.getOperand(0).getReg()) &&
- "The LEA's def register must have no uses");
- Last.eraseFromParent();
-
- // Erase removed LEA from the list.
- I2 = List.erase(I2);
-
- Changed = true;
+ ++I1;
}
- ++I1;
}
return Changed;
@@ -470,8 +615,7 @@ OptimizeLEAPass::removeRedundantLEAs(SmallVectorImpl<MachineInstr *> &List) {
bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
- // Perform this optimization only if we care about code size.
- if (!EnableX86LEAOpt || !MF.getFunction()->optForSize())
+ if (DisableX86LEAOpt || skipFunction(*MF.getFunction()))
return false;
MRI = &MF.getRegInfo();
@@ -480,7 +624,7 @@ bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) {
// Process all basic blocks.
for (auto &MBB : MF) {
- SmallVector<MachineInstr *, 16> LEAs;
+ MemOpMap LEAs;
InstrPos.clear();
// Find all LEA instructions in basic block.
@@ -490,13 +634,13 @@ bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) {
if (LEAs.empty())
continue;
- // Remove redundant LEA instructions. The optimization may have a negative
- // effect on performance, so do it only for -Oz.
- if (MF.getFunction()->optForMinSize())
- Changed |= removeRedundantLEAs(LEAs);
+ // Remove redundant LEA instructions.
+ Changed |= removeRedundantLEAs(LEAs);
- // Remove redundant address calculations.
- Changed |= removeRedundantAddrCalc(LEAs);
+ // Remove redundant address calculations. Do it only for -Os/-Oz since only
+ // a code size gain is expected from this part of the pass.
+ if (MF.getFunction()->optForSize())
+ Changed |= removeRedundantAddrCalc(LEAs);
}
return Changed;
diff --git a/lib/Target/X86/X86PadShortFunction.cpp b/lib/Target/X86/X86PadShortFunction.cpp
index 0f425e28fa7d5..62a9aafc2cf36 100644
--- a/lib/Target/X86/X86PadShortFunction.cpp
+++ b/lib/Target/X86/X86PadShortFunction.cpp
@@ -55,6 +55,11 @@ namespace {
bool runOnMachineFunction(MachineFunction &MF) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override {
return "X86 Atom pad short functions";
}
@@ -93,6 +98,9 @@ FunctionPass *llvm::createX86PadShortFunctions() {
/// runOnMachineFunction - Loop over all of the basic blocks, inserting
/// NOOP instructions before early exits.
bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
if (MF.getFunction()->optForSize()) {
return false;
}
@@ -179,13 +187,11 @@ bool PadShortFunc::cyclesUntilReturn(MachineBasicBlock *MBB,
unsigned int CyclesToEnd = 0;
- for (MachineBasicBlock::iterator MBBI = MBB->begin();
- MBBI != MBB->end(); ++MBBI) {
- MachineInstr *MI = MBBI;
+ for (MachineInstr &MI : *MBB) {
// Mark basic blocks with a return instruction. Calls to other
// functions do not count because the called function will be padded,
// if necessary.
- if (MI->isReturn() && !MI->isCall()) {
+ if (MI.isReturn() && !MI.isCall()) {
VisitedBBs[MBB] = VisitedBBInfo(true, CyclesToEnd);
Cycles += CyclesToEnd;
return true;
diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp
index 274b566885584..86750633aecc6 100644
--- a/lib/Target/X86/X86RegisterInfo.cpp
+++ b/lib/Target/X86/X86RegisterInfo.cpp
@@ -52,7 +52,7 @@ X86RegisterInfo::X86RegisterInfo(const Triple &TT)
X86_MC::getDwarfRegFlavour(TT, false),
X86_MC::getDwarfRegFlavour(TT, true),
(TT.isArch64Bit() ? X86::RIP : X86::EIP)) {
- X86_MC::InitLLVM2SEHRegisterMapping(this);
+ X86_MC::initLLVMToSEHAndCVRegMapping(this);
// Cache some information.
Is64Bit = TT.isArch64Bit();
@@ -162,10 +162,23 @@ X86RegisterInfo::getPointerRegClass(const MachineFunction &MF,
case 0: // Normal GPRs.
if (Subtarget.isTarget64BitLP64())
return &X86::GR64RegClass;
+ // If the target is 64bit but we have been told to use 32bit addresses,
+ // we can still use 64-bit register as long as we know the high bits
+ // are zeros.
+ // Reflect that in the returned register class.
+ if (Is64Bit) {
+ // When the target also allows 64-bit frame pointer and we do have a
+ // frame, this is fine to use it for the address accesses as well.
+ const X86FrameLowering *TFI = getFrameLowering(MF);
+ return TFI->hasFP(MF) && TFI->Uses64BitFramePtr
+ ? &X86::LOW32_ADDR_ACCESS_RBPRegClass
+ : &X86::LOW32_ADDR_ACCESSRegClass;
+ }
return &X86::GR32RegClass;
case 1: // Normal GPRs except the stack pointer (for encoding reasons).
if (Subtarget.isTarget64BitLP64())
return &X86::GR64_NOSPRegClass;
+ // NOSP does not contain RIP, so no special case here.
return &X86::GR32_NOSPRegClass;
case 2: // NOREX GPRs.
if (Subtarget.isTarget64BitLP64())
@@ -174,6 +187,7 @@ X86RegisterInfo::getPointerRegClass(const MachineFunction &MF,
case 3: // NOREX GPRs except the stack pointer (for encoding reasons).
if (Subtarget.isTarget64BitLP64())
return &X86::GR64_NOREX_NOSPRegClass;
+ // NOSP does not contain RIP, so no special case here.
return &X86::GR32_NOREX_NOSPRegClass;
case 4: // Available for tailcall (not callee-saved GPRs).
return getGPRsForTailCall(MF);
@@ -280,15 +294,19 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return CSR_64_SaveList;
case CallingConv::X86_INTR:
if (Is64Bit) {
+ if (HasAVX512)
+ return CSR_64_AllRegs_AVX512_SaveList;
if (HasAVX)
return CSR_64_AllRegs_AVX_SaveList;
- else
- return CSR_64_AllRegs_SaveList;
+ return CSR_64_AllRegs_SaveList;
} else {
+ if (HasAVX512)
+ return CSR_32_AllRegs_AVX512_SaveList;
+ if (HasAVX)
+ return CSR_32_AllRegs_AVX_SaveList;
if (HasSSE)
return CSR_32_AllRegs_SSE_SaveList;
- else
- return CSR_32_AllRegs_SaveList;
+ return CSR_32_AllRegs_SaveList;
}
default:
break;
@@ -299,6 +317,10 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return CSR_Win64_SaveList;
if (CallsEHReturn)
return CSR_64EHRet_SaveList;
+ if (Subtarget.getTargetLowering()->supportSwiftError() &&
+ MF->getFunction()->getAttributes().hasAttrSomewhere(
+ Attribute::SwiftError))
+ return CSR_64_SwiftError_SaveList;
return CSR_64_SaveList;
}
if (CallsEHReturn)
@@ -366,18 +388,22 @@ X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
return CSR_64_RegMask;
case CallingConv::X86_INTR:
if (Is64Bit) {
+ if (HasAVX512)
+ return CSR_64_AllRegs_AVX512_RegMask;
if (HasAVX)
return CSR_64_AllRegs_AVX_RegMask;
- else
- return CSR_64_AllRegs_RegMask;
+ return CSR_64_AllRegs_RegMask;
} else {
+ if (HasAVX512)
+ return CSR_32_AllRegs_AVX512_RegMask;
+ if (HasAVX)
+ return CSR_32_AllRegs_AVX_RegMask;
if (HasSSE)
return CSR_32_AllRegs_SSE_RegMask;
- else
- return CSR_32_AllRegs_RegMask;
+ return CSR_32_AllRegs_RegMask;
}
- default:
- break;
+ default:
+ break;
}
// Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
@@ -385,6 +411,10 @@ X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
if (Is64Bit) {
if (IsWin64)
return CSR_Win64_RegMask;
+ if (Subtarget.getTargetLowering()->supportSwiftError() &&
+ MF.getFunction()->getAttributes().hasAttrSomewhere(
+ Attribute::SwiftError))
+ return CSR_64_SwiftError_RegMask;
return CSR_64_RegMask;
}
return CSR_32_RegMask;
diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td
index 56f0d9352d301..373f9b4c65f2f 100644
--- a/lib/Target/X86/X86RegisterInfo.td
+++ b/lib/Target/X86/X86RegisterInfo.td
@@ -226,14 +226,14 @@ let SubRegIndices = [sub_ymm] in {
}
// Mask Registers, used by AVX-512 instructions.
-def K0 : X86Reg<"k0", 0>, DwarfRegNum<[118, -2, -2]>;
-def K1 : X86Reg<"k1", 1>, DwarfRegNum<[119, -2, -2]>;
-def K2 : X86Reg<"k2", 2>, DwarfRegNum<[120, -2, -2]>;
-def K3 : X86Reg<"k3", 3>, DwarfRegNum<[121, -2, -2]>;
-def K4 : X86Reg<"k4", 4>, DwarfRegNum<[122, -2, -2]>;
-def K5 : X86Reg<"k5", 5>, DwarfRegNum<[123, -2, -2]>;
-def K6 : X86Reg<"k6", 6>, DwarfRegNum<[124, -2, -2]>;
-def K7 : X86Reg<"k7", 7>, DwarfRegNum<[125, -2, -2]>;
+def K0 : X86Reg<"k0", 0>, DwarfRegNum<[118, 93, 93]>;
+def K1 : X86Reg<"k1", 1>, DwarfRegNum<[119, 94, 94]>;
+def K2 : X86Reg<"k2", 2>, DwarfRegNum<[120, 95, 95]>;
+def K3 : X86Reg<"k3", 3>, DwarfRegNum<[121, 96, 96]>;
+def K4 : X86Reg<"k4", 4>, DwarfRegNum<[122, 97, 97]>;
+def K5 : X86Reg<"k5", 5>, DwarfRegNum<[123, 98, 98]>;
+def K6 : X86Reg<"k6", 6>, DwarfRegNum<[124, 99, 99]>;
+def K7 : X86Reg<"k7", 7>, DwarfRegNum<[125, 100, 100]>;
// Floating point stack registers. These don't map one-to-one to the FP
// pseudo registers, but we still mark them as aliasing FP registers. That
@@ -415,6 +415,26 @@ def GR32_NOREX_NOSP : RegisterClass<"X86", [i32], 32,
def GR64_NOREX_NOSP : RegisterClass<"X86", [i64], 64,
(and GR64_NOREX, GR64_NOSP)>;
+// Register classes used for ABIs that use 32-bit address accesses,
+// while using the whole x84_64 ISA.
+
+// In such cases, it is fine to use RIP as we are sure the 32 high
+// bits are not set. We do not need variants for NOSP as RIP is not
+// allowed there.
+// RIP is not spilled anywhere for now, so stick to 32-bit alignment
+// to save on memory space.
+// FIXME: We could allow all 64bit registers, but we would need
+// something to check that the 32 high bits are not set,
+// which we do not have right now.
+def LOW32_ADDR_ACCESS : RegisterClass<"X86", [i32], 32, (add GR32, RIP)>;
+
+// When RBP is used as a base pointer in a 32-bit addresses environement,
+// this is also safe to use the full register to access addresses.
+// Since RBP will never be spilled, stick to a 32 alignment to save
+// on memory consumption.
+def LOW32_ADDR_ACCESS_RBP : RegisterClass<"X86", [i32], 32,
+ (add LOW32_ADDR_ACCESS, RBP)>;
+
// A class to support the 'A' assembler constraint: EAX then EDX.
def GR32_AD : RegisterClass<"X86", [i32], 32, (add EAX, EDX)>;
@@ -451,6 +471,17 @@ def VR128 : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64],
def VR256 : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
256, (sequence "YMM%u", 0, 15)>;
+// Special classes that help the assembly parser choose some alternate
+// instructions to favor 2-byte VEX encodings.
+def VR128L : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64],
+ 128, (sequence "XMM%u", 0, 7)>;
+def VR128H : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64],
+ 128, (sequence "XMM%u", 8, 15)>;
+def VR256L : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
+ 256, (sequence "YMM%u", 0, 7)>;
+def VR256H : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
+ 256, (sequence "YMM%u", 8, 15)>;
+
// Status flags registers.
def CCR : RegisterClass<"X86", [i32], 32, (add EFLAGS)> {
let CopyCost = -1; // Don't allow copying of status registers.
@@ -477,18 +508,18 @@ def VR256X : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
256, (sequence "YMM%u", 0, 31)>;
// Mask registers
-def VK1 : RegisterClass<"X86", [i1], 8, (sequence "K%u", 0, 7)> {let Size = 8;}
-def VK2 : RegisterClass<"X86", [v2i1], 8, (add VK1)> {let Size = 8;}
-def VK4 : RegisterClass<"X86", [v4i1], 8, (add VK2)> {let Size = 8;}
-def VK8 : RegisterClass<"X86", [v8i1], 8, (add VK4)> {let Size = 8;}
+def VK1 : RegisterClass<"X86", [i1], 16, (sequence "K%u", 0, 7)> {let Size = 16;}
+def VK2 : RegisterClass<"X86", [v2i1], 16, (add VK1)> {let Size = 16;}
+def VK4 : RegisterClass<"X86", [v4i1], 16, (add VK2)> {let Size = 16;}
+def VK8 : RegisterClass<"X86", [v8i1], 16, (add VK4)> {let Size = 16;}
def VK16 : RegisterClass<"X86", [v16i1], 16, (add VK8)> {let Size = 16;}
def VK32 : RegisterClass<"X86", [v32i1], 32, (add VK16)> {let Size = 32;}
def VK64 : RegisterClass<"X86", [v64i1], 64, (add VK32)> {let Size = 64;}
-def VK1WM : RegisterClass<"X86", [i1], 8, (sub VK1, K0)> {let Size = 8;}
-def VK2WM : RegisterClass<"X86", [v2i1], 8, (sub VK2, K0)> {let Size = 8;}
-def VK4WM : RegisterClass<"X86", [v4i1], 8, (sub VK4, K0)> {let Size = 8;}
-def VK8WM : RegisterClass<"X86", [v8i1], 8, (sub VK8, K0)> {let Size = 8;}
+def VK1WM : RegisterClass<"X86", [i1], 16, (sub VK1, K0)> {let Size = 16;}
+def VK2WM : RegisterClass<"X86", [v2i1], 16, (sub VK2, K0)> {let Size = 16;}
+def VK4WM : RegisterClass<"X86", [v4i1], 16, (sub VK4, K0)> {let Size = 16;}
+def VK8WM : RegisterClass<"X86", [v8i1], 16, (sub VK8, K0)> {let Size = 16;}
def VK16WM : RegisterClass<"X86", [v16i1], 16, (add VK8WM)> {let Size = 16;}
def VK32WM : RegisterClass<"X86", [v32i1], 32, (add VK16WM)> {let Size = 32;}
def VK64WM : RegisterClass<"X86", [v64i1], 64, (add VK32WM)> {let Size = 64;}
diff --git a/lib/Target/X86/X86Schedule.td b/lib/Target/X86/X86Schedule.td
index a261356afe6a6..35257f89100ca 100644
--- a/lib/Target/X86/X86Schedule.td
+++ b/lib/Target/X86/X86Schedule.td
@@ -364,6 +364,8 @@ def IIC_SSE_PALIGNRR : InstrItinClass;
def IIC_SSE_PALIGNRM : InstrItinClass;
def IIC_SSE_MWAIT : InstrItinClass;
def IIC_SSE_MONITOR : InstrItinClass;
+def IIC_SSE_MWAITX : InstrItinClass;
+def IIC_SSE_MONITORX : InstrItinClass;
def IIC_SSE_PREFETCH : InstrItinClass;
def IIC_SSE_PAUSE : InstrItinClass;
@@ -633,13 +635,22 @@ def IIC_NOP : InstrItinClass;
// latencies. Since these latencies are not used for pipeline hazards,
// they do not need to be exact.
//
-// The GenericModel contains no instruction itineraries.
-def GenericModel : SchedMachineModel {
+// The GenericX86Model contains no instruction itineraries
+// and disables PostRAScheduler.
+class GenericX86Model : SchedMachineModel {
let IssueWidth = 4;
let MicroOpBufferSize = 32;
let LoadLatency = 4;
let HighLatency = 10;
let PostRAScheduler = 0;
+ let CompleteModel = 0;
+}
+
+def GenericModel : GenericX86Model;
+
+// Define a model with the PostRAScheduler enabled.
+def GenericPostRAModel : GenericX86Model {
+ let PostRAScheduler = 1;
}
include "X86ScheduleAtom.td"
diff --git a/lib/Target/X86/X86ScheduleAtom.td b/lib/Target/X86/X86ScheduleAtom.td
index 4c559c9c1798d..a5b440182aa91 100644
--- a/lib/Target/X86/X86ScheduleAtom.td
+++ b/lib/Target/X86/X86ScheduleAtom.td
@@ -544,6 +544,7 @@ def AtomModel : SchedMachineModel {
// simple loops, expand by a small factor to hide the backedge cost.
let LoopMicroOpBufferSize = 10;
let PostRAScheduler = 1;
+ let CompleteModel = 0;
let Itineraries = AtomItineraries;
}
diff --git a/lib/Target/X86/X86SelectionDAGInfo.cpp b/lib/Target/X86/X86SelectionDAGInfo.cpp
index b1a01614b4a1b..d02859b3dcbd4 100644
--- a/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -25,7 +25,7 @@ using namespace llvm;
#define DEBUG_TYPE "x86-selectiondag-info"
bool X86SelectionDAGInfo::isBaseRegConflictPossible(
- SelectionDAG &DAG, ArrayRef<unsigned> ClobberSet) const {
+ SelectionDAG &DAG, ArrayRef<MCPhysReg> ClobberSet) const {
// We cannot use TRI->hasBasePointer() until *after* we select all basic
// blocks. Legalization may introduce new stack temporaries with large
// alignment requirements. Fall back to generic code if there are any
@@ -45,7 +45,7 @@ bool X86SelectionDAGInfo::isBaseRegConflictPossible(
}
SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(
- SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src,
+ SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo) const {
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
@@ -54,8 +54,8 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(
#ifndef NDEBUG
// If the base register might conflict with our physical registers, bail out.
- const unsigned ClobberSet[] = {X86::RCX, X86::RAX, X86::RDI,
- X86::ECX, X86::EAX, X86::EDI};
+ const MCPhysReg ClobberSet[] = {X86::RCX, X86::RAX, X86::RDI,
+ X86::ECX, X86::EAX, X86::EDI};
assert(!isBaseRegConflictPossible(DAG, ClobberSet));
#endif
@@ -87,8 +87,7 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl).setChain(Chain)
.setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
- DAG.getExternalSymbol(bzeroEntry, IntPtr), std::move(Args),
- 0)
+ DAG.getExternalSymbol(bzeroEntry, IntPtr), std::move(Args))
.setDiscardResult();
std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
@@ -195,7 +194,7 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(
}
SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy(
- SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src,
+ SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
// This requires the copy size to be a constant, preferably
@@ -222,8 +221,8 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy(
return SDValue();
// If the base register might conflict with our physical registers, bail out.
- const unsigned ClobberSet[] = {X86::RCX, X86::RSI, X86::RDI,
- X86::ECX, X86::ESI, X86::EDI};
+ const MCPhysReg ClobberSet[] = {X86::RCX, X86::RSI, X86::RDI,
+ X86::ECX, X86::ESI, X86::EDI};
if (isBaseRegConflictPossible(DAG, ClobberSet))
return SDValue();
diff --git a/lib/Target/X86/X86SelectionDAGInfo.h b/lib/Target/X86/X86SelectionDAGInfo.h
index 961bd8c8d5ef1..f4a285a5f916f 100644
--- a/lib/Target/X86/X86SelectionDAGInfo.h
+++ b/lib/Target/X86/X86SelectionDAGInfo.h
@@ -7,14 +7,15 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the X86 subclass for TargetSelectionDAGInfo.
+// This file defines the X86 subclass for SelectionDAGTargetInfo.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_X86_X86SELECTIONDAGINFO_H
#define LLVM_LIB_TARGET_X86_X86SELECTIONDAGINFO_H
-#include "llvm/Target/TargetSelectionDAGInfo.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
namespace llvm {
@@ -22,27 +23,24 @@ class X86TargetLowering;
class X86TargetMachine;
class X86Subtarget;
-class X86SelectionDAGInfo : public TargetSelectionDAGInfo {
+class X86SelectionDAGInfo : public SelectionDAGTargetInfo {
/// Returns true if it is possible for the base register to conflict with the
/// given set of clobbers for a memory intrinsic.
bool isBaseRegConflictPossible(SelectionDAG &DAG,
- ArrayRef<unsigned> ClobberSet) const;
+ ArrayRef<MCPhysReg> ClobberSet) const;
public:
explicit X86SelectionDAGInfo() = default;
- SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- bool isVolatile,
+ SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &dl,
+ SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo) const override;
- SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- bool isVolatile, bool AlwaysInline,
+ SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
+ SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVolatile,
+ bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const override;
};
diff --git a/lib/Target/X86/X86ShuffleDecodeConstantPool.cpp b/lib/Target/X86/X86ShuffleDecodeConstantPool.cpp
index ef16c5bdbfd8f..1adc92cfda633 100644
--- a/lib/Target/X86/X86ShuffleDecodeConstantPool.cpp
+++ b/lib/Target/X86/X86ShuffleDecodeConstantPool.cpp
@@ -40,24 +40,43 @@ void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
assert(MaskTySize == 128 || MaskTySize == 256 || MaskTySize == 512);
#endif
- // This is a straightforward byte vector.
- if (MaskTy->isVectorTy() && MaskTy->getVectorElementType()->isIntegerTy(8)) {
- int NumElements = MaskTy->getVectorNumElements();
- ShuffleMask.reserve(NumElements);
+ if (!MaskTy->isVectorTy())
+ return;
+ int NumElts = MaskTy->getVectorNumElements();
+
+ Type *EltTy = MaskTy->getVectorElementType();
+ if (!EltTy->isIntegerTy())
+ return;
- for (int i = 0; i < NumElements; ++i) {
+ // The shuffle mask requires a byte vector - decode cases with
+ // wider elements as well.
+ unsigned BitWidth = cast<IntegerType>(EltTy)->getBitWidth();
+ if ((BitWidth % 8) != 0)
+ return;
+
+ int Scale = BitWidth / 8;
+ int NumBytes = NumElts * Scale;
+ ShuffleMask.reserve(NumBytes);
+
+ for (int i = 0; i != NumElts; ++i) {
+ Constant *COp = C->getAggregateElement(i);
+ if (!COp) {
+ ShuffleMask.clear();
+ return;
+ } else if (isa<UndefValue>(COp)) {
+ ShuffleMask.append(Scale, SM_SentinelUndef);
+ continue;
+ }
+
+ APInt APElt = cast<ConstantInt>(COp)->getValue();
+ for (int j = 0; j != Scale; ++j) {
// For AVX vectors with 32 bytes the base of the shuffle is the 16-byte
// lane of the vector we're inside.
- int Base = i & ~0xf;
- Constant *COp = C->getAggregateElement(i);
- if (!COp) {
- ShuffleMask.clear();
- return;
- } else if (isa<UndefValue>(COp)) {
- ShuffleMask.push_back(SM_SentinelUndef);
- continue;
- }
- uint64_t Element = cast<ConstantInt>(COp)->getZExtValue();
+ int Base = ((i * Scale) + j) & ~0xf;
+
+ uint64_t Element = APElt.getLoBits(8).getZExtValue();
+ APElt = APElt.lshr(8);
+
// If the high bit (7) of the byte is set, the element is zeroed.
if (Element & (1 << 7))
ShuffleMask.push_back(SM_SentinelZero);
@@ -68,7 +87,8 @@ void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
}
}
}
- // TODO: Handle funny-looking vectors too.
+
+ assert(NumBytes == (int)ShuffleMask.size() && "Unexpected shuffle mask size");
}
void DecodeVPERMILPMask(const Constant *C, unsigned ElSize,
@@ -84,9 +104,11 @@ void DecodeVPERMILPMask(const Constant *C, unsigned ElSize,
// <4 x i32> <i32 -2147483648, i32 -2147483648,
// i32 -2147483648, i32 -2147483648>
- unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
+ if (ElSize != 32 && ElSize != 64)
+ return;
- if (MaskTySize != 128 && MaskTySize != 256) // FIXME: Add support for AVX-512.
+ unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
+ if (MaskTySize != 128 && MaskTySize != 256 && MaskTySize != 512)
return;
// Only support vector types.
@@ -99,14 +121,15 @@ void DecodeVPERMILPMask(const Constant *C, unsigned ElSize,
return;
// Support any element type from byte up to element size.
- // This is necesary primarily because 64-bit elements get split to 32-bit
+ // This is necessary primarily because 64-bit elements get split to 32-bit
// in the constant pool on 32-bit target.
unsigned EltTySize = VecEltTy->getIntegerBitWidth();
if (EltTySize < 8 || EltTySize > ElSize)
return;
unsigned NumElements = MaskTySize / ElSize;
- assert((NumElements == 2 || NumElements == 4 || NumElements == 8) &&
+ assert((NumElements == 2 || NumElements == 4 || NumElements == 8 ||
+ NumElements == 16) &&
"Unexpected number of vector elements.");
ShuffleMask.reserve(NumElements);
unsigned NumElementsPerLane = 128 / ElSize;
@@ -133,12 +156,154 @@ void DecodeVPERMILPMask(const Constant *C, unsigned ElSize,
// TODO: Handle funny-looking vectors too.
}
+void DecodeVPERMIL2PMask(const Constant *C, unsigned M2Z, unsigned ElSize,
+ SmallVectorImpl<int> &ShuffleMask) {
+ Type *MaskTy = C->getType();
+
+ unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
+ if (MaskTySize != 128 && MaskTySize != 256)
+ return;
+
+ // Only support vector types.
+ if (!MaskTy->isVectorTy())
+ return;
+
+ // Make sure its an integer type.
+ Type *VecEltTy = MaskTy->getVectorElementType();
+ if (!VecEltTy->isIntegerTy())
+ return;
+
+ // Support any element type from byte up to element size.
+ // This is necessary primarily because 64-bit elements get split to 32-bit
+ // in the constant pool on 32-bit target.
+ unsigned EltTySize = VecEltTy->getIntegerBitWidth();
+ if (EltTySize < 8 || EltTySize > ElSize)
+ return;
+
+ unsigned NumElements = MaskTySize / ElSize;
+ assert((NumElements == 2 || NumElements == 4 || NumElements == 8) &&
+ "Unexpected number of vector elements.");
+ ShuffleMask.reserve(NumElements);
+ unsigned NumElementsPerLane = 128 / ElSize;
+ unsigned Factor = ElSize / EltTySize;
+
+ for (unsigned i = 0; i < NumElements; ++i) {
+ Constant *COp = C->getAggregateElement(i * Factor);
+ if (!COp) {
+ ShuffleMask.clear();
+ return;
+ } else if (isa<UndefValue>(COp)) {
+ ShuffleMask.push_back(SM_SentinelUndef);
+ continue;
+ }
+
+ // VPERMIL2 Operation.
+ // Bits[3] - Match Bit.
+ // Bits[2:1] - (Per Lane) PD Shuffle Mask.
+ // Bits[2:0] - (Per Lane) PS Shuffle Mask.
+ uint64_t Selector = cast<ConstantInt>(COp)->getZExtValue();
+ unsigned MatchBit = (Selector >> 3) & 0x1;
+
+ // M2Z[0:1] MatchBit
+ // 0Xb X Source selected by Selector index.
+ // 10b 0 Source selected by Selector index.
+ // 10b 1 Zero.
+ // 11b 0 Zero.
+ // 11b 1 Source selected by Selector index.
+ if ((M2Z & 0x2) != 0u && MatchBit != (M2Z & 0x1)) {
+ ShuffleMask.push_back(SM_SentinelZero);
+ continue;
+ }
+
+ int Index = i & ~(NumElementsPerLane - 1);
+ if (ElSize == 64)
+ Index += (Selector >> 1) & 0x1;
+ else
+ Index += Selector & 0x3;
+
+ int Src = (Selector >> 2) & 0x1;
+ Index += Src * NumElements;
+ ShuffleMask.push_back(Index);
+ }
+
+ // TODO: Handle funny-looking vectors too.
+}
+
+void DecodeVPPERMMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
+ Type *MaskTy = C->getType();
+ assert(MaskTy->getPrimitiveSizeInBits() == 128);
+
+ // Only support vector types.
+ if (!MaskTy->isVectorTy())
+ return;
+
+ // Make sure its an integer type.
+ Type *VecEltTy = MaskTy->getVectorElementType();
+ if (!VecEltTy->isIntegerTy())
+ return;
+
+ // The shuffle mask requires a byte vector - decode cases with
+ // wider elements as well.
+ unsigned BitWidth = cast<IntegerType>(VecEltTy)->getBitWidth();
+ if ((BitWidth % 8) != 0)
+ return;
+
+ int NumElts = MaskTy->getVectorNumElements();
+ int Scale = BitWidth / 8;
+ int NumBytes = NumElts * Scale;
+ ShuffleMask.reserve(NumBytes);
+
+ for (int i = 0; i != NumElts; ++i) {
+ Constant *COp = C->getAggregateElement(i);
+ if (!COp) {
+ ShuffleMask.clear();
+ return;
+ } else if (isa<UndefValue>(COp)) {
+ ShuffleMask.append(Scale, SM_SentinelUndef);
+ continue;
+ }
+
+ // VPPERM Operation
+ // Bits[4:0] - Byte Index (0 - 31)
+ // Bits[7:5] - Permute Operation
+ //
+ // Permute Operation:
+ // 0 - Source byte (no logical operation).
+ // 1 - Invert source byte.
+ // 2 - Bit reverse of source byte.
+ // 3 - Bit reverse of inverted source byte.
+ // 4 - 00h (zero - fill).
+ // 5 - FFh (ones - fill).
+ // 6 - Most significant bit of source byte replicated in all bit positions.
+ // 7 - Invert most significant bit of source byte and replicate in all bit positions.
+ APInt MaskElt = cast<ConstantInt>(COp)->getValue();
+ for (int j = 0; j != Scale; ++j) {
+ APInt Index = MaskElt.getLoBits(5);
+ APInt PermuteOp = MaskElt.lshr(5).getLoBits(3);
+ MaskElt = MaskElt.lshr(8);
+
+ if (PermuteOp == 4) {
+ ShuffleMask.push_back(SM_SentinelZero);
+ continue;
+ }
+ if (PermuteOp != 0) {
+ ShuffleMask.clear();
+ return;
+ }
+ ShuffleMask.push_back((int)Index.getZExtValue());
+ }
+ }
+
+ assert(NumBytes == (int)ShuffleMask.size() && "Unexpected shuffle mask size");
+}
+
void DecodeVPERMVMask(const Constant *C, MVT VT,
SmallVectorImpl<int> &ShuffleMask) {
Type *MaskTy = C->getType();
if (MaskTy->isVectorTy()) {
unsigned NumElements = MaskTy->getVectorNumElements();
if (NumElements == VT.getVectorNumElements()) {
+ unsigned EltMaskSize = Log2_64(NumElements);
for (unsigned i = 0; i < NumElements; ++i) {
Constant *COp = C->getAggregateElement(i);
if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp))) {
@@ -148,9 +313,9 @@ void DecodeVPERMVMask(const Constant *C, MVT VT,
if (isa<UndefValue>(COp))
ShuffleMask.push_back(SM_SentinelUndef);
else {
- uint64_t Element = cast<ConstantInt>(COp)->getZExtValue();
- Element &= (1 << NumElements) - 1;
- ShuffleMask.push_back(Element);
+ APInt Element = cast<ConstantInt>(COp)->getValue();
+ Element = Element.getLoBits(EltMaskSize);
+ ShuffleMask.push_back(Element.getZExtValue());
}
}
}
@@ -171,6 +336,7 @@ void DecodeVPERMV3Mask(const Constant *C, MVT VT,
Type *MaskTy = C->getType();
unsigned NumElements = MaskTy->getVectorNumElements();
if (NumElements == VT.getVectorNumElements()) {
+ unsigned EltMaskSize = Log2_64(NumElements * 2);
for (unsigned i = 0; i < NumElements; ++i) {
Constant *COp = C->getAggregateElement(i);
if (!COp) {
@@ -180,9 +346,9 @@ void DecodeVPERMV3Mask(const Constant *C, MVT VT,
if (isa<UndefValue>(COp))
ShuffleMask.push_back(SM_SentinelUndef);
else {
- uint64_t Element = cast<ConstantInt>(COp)->getZExtValue();
- Element &= (1 << NumElements*2) - 1;
- ShuffleMask.push_back(Element);
+ APInt Element = cast<ConstantInt>(COp)->getValue();
+ Element = Element.getLoBits(EltMaskSize);
+ ShuffleMask.push_back(Element.getZExtValue());
}
}
}
diff --git a/lib/Target/X86/X86ShuffleDecodeConstantPool.h b/lib/Target/X86/X86ShuffleDecodeConstantPool.h
index bcf46322c8cd5..d2565b849807e 100644
--- a/lib/Target/X86/X86ShuffleDecodeConstantPool.h
+++ b/lib/Target/X86/X86ShuffleDecodeConstantPool.h
@@ -25,18 +25,25 @@ namespace llvm {
class Constant;
class MVT;
-/// \brief Decode a PSHUFB mask from an IR-level vector constant.
+/// Decode a PSHUFB mask from an IR-level vector constant.
void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a VPERMILP variable mask from an IR-level vector constant.
+/// Decode a VPERMILP variable mask from an IR-level vector constant.
void DecodeVPERMILPMask(const Constant *C, unsigned ElSize,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a VPERM W/D/Q/PS/PD mask from an IR-level vector constant.
+/// Decode a VPERMILP2 variable mask from an IR-level vector constant.
+void DecodeVPERMIL2PMask(const Constant *C, unsigned MatchImm, unsigned ElSize,
+ SmallVectorImpl<int> &ShuffleMask);
+
+/// Decode a VPPERM variable mask from an IR-level vector constant.
+void DecodeVPPERMMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask);
+
+/// Decode a VPERM W/D/Q/PS/PD mask from an IR-level vector constant.
void DecodeVPERMVMask(const Constant *C, MVT VT,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a VPERMT2 W/D/Q/PS/PD mask from an IR-level vector constant.
+/// Decode a VPERMT2 W/D/Q/PS/PD mask from an IR-level vector constant.
void DecodeVPERMV3Mask(const Constant *C, MVT VT,
SmallVectorImpl<int> &ShuffleMask);
diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp
index 8ef08c960f0b7..8f77682d22766 100644
--- a/lib/Target/X86/X86Subtarget.cpp
+++ b/lib/Target/X86/X86Subtarget.cpp
@@ -46,105 +46,99 @@ X86EarlyIfConv("x86-early-ifcvt", cl::Hidden,
/// Classify a blockaddress reference for the current subtarget according to how
/// we should reference it in a non-pcrel context.
-unsigned char X86Subtarget::ClassifyBlockAddressReference() const {
- if (isPICStyleGOT()) // 32-bit ELF targets.
- return X86II::MO_GOTOFF;
-
- if (isPICStyleStubPIC()) // Darwin/32 in PIC mode.
- return X86II::MO_PIC_BASE_OFFSET;
-
- // Direct static reference to label.
- return X86II::MO_NO_FLAG;
+unsigned char X86Subtarget::classifyBlockAddressReference() const {
+ return classifyLocalReference(nullptr);
}
/// Classify a global variable reference for the current subtarget according to
/// how we should reference it in a non-pcrel context.
-unsigned char X86Subtarget::
-ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const {
- // DLLImport only exists on windows, it is implemented as a load from a
- // DLLIMPORT stub.
- if (GV->hasDLLImportStorageClass())
- return X86II::MO_DLLIMPORT;
+unsigned char
+X86Subtarget::classifyGlobalReference(const GlobalValue *GV) const {
+ return classifyGlobalReference(GV, *GV->getParent());
+}
- bool isDef = GV->isStrongDefinitionForLinker();
+unsigned char
+X86Subtarget::classifyLocalReference(const GlobalValue *GV) const {
+ // 64 bits can use %rip addressing for anything local.
+ if (is64Bit())
+ return X86II::MO_NO_FLAG;
- // X86-64 in PIC mode.
- if (isPICStyleRIPRel()) {
- // Large model never uses stubs.
- if (TM.getCodeModel() == CodeModel::Large)
- return X86II::MO_NO_FLAG;
+ // If this is for a position dependent executable, the static linker can
+ // figure it out.
+ if (!isPositionIndependent())
+ return X86II::MO_NO_FLAG;
- if (isTargetDarwin()) {
- // If symbol visibility is hidden, the extra load is not needed if
- // target is x86-64 or the symbol is definitely defined in the current
- // translation unit.
- if (GV->hasDefaultVisibility() && !isDef)
- return X86II::MO_GOTPCREL;
- } else if (!isTargetWin64()) {
- assert(isTargetELF() && "Unknown rip-relative target");
+ // The COFF dynamic linker just patches the executable sections.
+ if (isTargetCOFF())
+ return X86II::MO_NO_FLAG;
- // Extra load is needed for all externally visible.
- if (!GV->hasLocalLinkage() && GV->hasDefaultVisibility())
- return X86II::MO_GOTPCREL;
- }
+ if (isTargetDarwin()) {
+ // 32 bit macho has no relocation for a-b if a is undefined, even if
+ // b is in the section that is being relocated.
+ // This means we have to use o load even for GVs that are known to be
+ // local to the dso.
+ if (GV && (GV->isDeclarationForLinker() || GV->hasCommonLinkage()))
+ return X86II::MO_DARWIN_NONLAZY_PIC_BASE;
- return X86II::MO_NO_FLAG;
+ return X86II::MO_PIC_BASE_OFFSET;
}
- if (isPICStyleGOT()) { // 32-bit ELF targets.
- // Extra load is needed for all externally visible.
- if (GV->hasLocalLinkage() || GV->hasHiddenVisibility())
- return X86II::MO_GOTOFF;
- return X86II::MO_GOT;
- }
+ return X86II::MO_GOTOFF;
+}
- if (isPICStyleStubPIC()) { // Darwin/32 in PIC mode.
- // Determine whether we have a stub reference and/or whether the reference
- // is relative to the PIC base or not.
+unsigned char X86Subtarget::classifyGlobalReference(const GlobalValue *GV,
+ const Module &M) const {
+ // Large model never uses stubs.
+ if (TM.getCodeModel() == CodeModel::Large)
+ return X86II::MO_NO_FLAG;
- // If this is a strong reference to a definition, it is definitely not
- // through a stub.
- if (isDef)
- return X86II::MO_PIC_BASE_OFFSET;
+ if (TM.shouldAssumeDSOLocal(M, GV))
+ return classifyLocalReference(GV);
- // Unless we have a symbol with hidden visibility, we have to go through a
- // normal $non_lazy_ptr stub because this symbol might be resolved late.
- if (!GV->hasHiddenVisibility()) // Non-hidden $non_lazy_ptr reference.
- return X86II::MO_DARWIN_NONLAZY_PIC_BASE;
+ if (isTargetCOFF())
+ return X86II::MO_DLLIMPORT;
- // If symbol visibility is hidden, we have a stub for common symbol
- // references and external declarations.
- if (GV->isDeclarationForLinker() || GV->hasCommonLinkage()) {
- // Hidden $non_lazy_ptr reference.
- return X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE;
- }
+ if (is64Bit())
+ return X86II::MO_GOTPCREL;
- // Otherwise, no stub.
- return X86II::MO_PIC_BASE_OFFSET;
+ if (isTargetDarwin()) {
+ if (!isPositionIndependent())
+ return X86II::MO_DARWIN_NONLAZY;
+ return X86II::MO_DARWIN_NONLAZY_PIC_BASE;
}
- if (isPICStyleStubNoDynamic()) { // Darwin/32 in -mdynamic-no-pic mode.
- // Determine whether we have a stub reference.
+ return X86II::MO_GOT;
+}
- // If this is a strong reference to a definition, it is definitely not
- // through a stub.
- if (isDef)
- return X86II::MO_NO_FLAG;
+unsigned char
+X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV) const {
+ return classifyGlobalFunctionReference(GV, *GV->getParent());
+}
- // Unless we have a symbol with hidden visibility, we have to go through a
- // normal $non_lazy_ptr stub because this symbol might be resolved late.
- if (!GV->hasHiddenVisibility()) // Non-hidden $non_lazy_ptr reference.
- return X86II::MO_DARWIN_NONLAZY;
+unsigned char
+X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV,
+ const Module &M) const {
+ if (TM.shouldAssumeDSOLocal(M, GV))
+ return X86II::MO_NO_FLAG;
+
+ assert(!isTargetCOFF());
- // Otherwise, no stub.
+ if (isTargetELF())
+ return X86II::MO_PLT;
+
+ if (is64Bit()) {
+ auto *F = dyn_cast_or_null<Function>(GV);
+ if (F && F->hasFnAttribute(Attribute::NonLazyBind))
+ // If the function is marked as non-lazy, generate an indirect call
+ // which loads from the GOT directly. This avoids runtime overhead
+ // at the cost of eager binding (and one extra byte of encoding).
+ return X86II::MO_GOTPCREL;
return X86II::MO_NO_FLAG;
}
- // Direct static reference to global.
return X86II::MO_NO_FLAG;
}
-
/// This function returns the name of a function which has an interface like
/// the non-standard bzero function, if such a function exists on the
/// current subtarget and it is considered preferable over memset with zero
@@ -165,7 +159,7 @@ bool X86Subtarget::hasSinCos() const {
}
/// Return true if the subtarget allows calls to immediate address.
-bool X86Subtarget::IsLegalToCallImmediateAddr(const TargetMachine &TM) const {
+bool X86Subtarget::isLegalToCallImmediateAddr() const {
// FIXME: I386 PE/COFF supports PC relative calls using IMAGE_REL_I386_REL32
// but WinCOFFObjectWriter::RecordRelocation cannot emit them. Once it does,
// the following check for Win32 should be removed.
@@ -227,18 +221,19 @@ void X86Subtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
assert((!In64BitMode || HasX86_64) &&
"64-bit code requested on a subtarget that doesn't support it!");
- // Stack alignment is 16 bytes on Darwin, Linux and Solaris (both
+ // Stack alignment is 16 bytes on Darwin, Linux, kFreeBSD and Solaris (both
// 32 and 64 bit) and for all 64-bit targets.
if (StackAlignOverride)
stackAlignment = StackAlignOverride;
else if (isTargetDarwin() || isTargetLinux() || isTargetSolaris() ||
- In64BitMode)
+ isTargetKFreeBSD() || In64BitMode)
stackAlignment = 16;
}
void X86Subtarget::initializeEnvironment() {
X86SSELevel = NoSSE;
X863DNowLevel = NoThreeDNow;
+ HasX87 = false;
HasCMov = false;
HasX86_64 = false;
HasPOPCNT = false;
@@ -261,6 +256,8 @@ void X86Subtarget::initializeEnvironment() {
HasLZCNT = false;
HasBMI = false;
HasBMI2 = false;
+ HasVBMI = false;
+ HasIFMA = false;
HasRTM = false;
HasHLE = false;
HasERI = false;
@@ -275,6 +272,7 @@ void X86Subtarget::initializeEnvironment() {
HasPRFCHW = false;
HasRDSEED = false;
HasLAHFSAHF = false;
+ HasMWAITX = false;
HasMPX = false;
IsBTMemSlow = false;
IsSHLDSlow = false;
@@ -283,6 +281,7 @@ void X86Subtarget::initializeEnvironment() {
HasSSEUnalignedMem = false;
HasCmpxchg16b = false;
UseLeaForSP = false;
+ HasFastPartialYMMWrite = false;
HasSlowDivide32 = false;
HasSlowDivide64 = false;
PadShortFunctions = false;
@@ -303,11 +302,11 @@ X86Subtarget &X86Subtarget::initializeSubtargetDependencies(StringRef CPU,
return *this;
}
-X86Subtarget::X86Subtarget(const Triple &TT, const std::string &CPU,
- const std::string &FS, const X86TargetMachine &TM,
+X86Subtarget::X86Subtarget(const Triple &TT, StringRef CPU, StringRef FS,
+ const X86TargetMachine &TM,
unsigned StackAlignOverride)
: X86GenSubtargetInfo(TT, CPU, FS), X86ProcFamily(Others),
- PICStyle(PICStyles::None), TargetTriple(TT),
+ PICStyle(PICStyles::None), TM(TM), TargetTriple(TT),
StackAlignOverride(StackAlignOverride),
In64BitMode(TargetTriple.getArch() == Triple::x86_64),
In32BitMode(TargetTriple.getArch() == Triple::x86 &&
@@ -317,24 +316,16 @@ X86Subtarget::X86Subtarget(const Triple &TT, const std::string &CPU,
TSInfo(), InstrInfo(initializeSubtargetDependencies(CPU, FS)),
TLInfo(TM, *this), FrameLowering(*this, getStackAlignment()) {
// Determine the PICStyle based on the target selected.
- if (TM.getRelocationModel() == Reloc::Static) {
- // Unless we're in PIC or DynamicNoPIC mode, set the PIC style to None.
+ if (!isPositionIndependent())
setPICStyle(PICStyles::None);
- } else if (is64Bit()) {
- // PIC in 64 bit mode is always rip-rel.
+ else if (is64Bit())
setPICStyle(PICStyles::RIPRel);
- } else if (isTargetCOFF()) {
+ else if (isTargetCOFF())
setPICStyle(PICStyles::None);
- } else if (isTargetDarwin()) {
- if (TM.getRelocationModel() == Reloc::PIC_)
- setPICStyle(PICStyles::StubPIC);
- else {
- assert(TM.getRelocationModel() == Reloc::DynamicNoPIC);
- setPICStyle(PICStyles::StubDynamicNoPIC);
- }
- } else if (isTargetELF()) {
+ else if (isTargetDarwin())
+ setPICStyle(PICStyles::StubPIC);
+ else if (isTargetELF())
setPICStyle(PICStyles::GOT);
- }
}
bool X86Subtarget::enableEarlyIfConversion() const {
diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h
index 13d1026dcaa0b..a274b797ca8f1 100644
--- a/lib/Target/X86/X86Subtarget.h
+++ b/lib/Target/X86/X86Subtarget.h
@@ -35,11 +35,10 @@ class TargetMachine;
///
namespace PICStyles {
enum Style {
- StubPIC, // Used on i386-darwin in -fPIC mode.
- StubDynamicNoPIC, // Used on i386-darwin in -mdynamic-no-pic mode.
- GOT, // Used on many 32-bit unices in -fPIC mode.
- RIPRel, // Used on X86-64 when not in -static mode.
- None // Set when in -static mode (not PIC or DynamicNoPIC mode).
+ StubPIC, // Used on i386-darwin in pic mode.
+ GOT, // Used on 32 bit elf on when in pic mode.
+ RIPRel, // Used on X86-64 when in pic mode.
+ None // Set when not in pic mode.
};
}
@@ -64,12 +63,17 @@ protected:
/// Which PIC style to use
PICStyles::Style PICStyle;
+ const TargetMachine &TM;
+
/// SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, or none supported.
X86SSEEnum X86SSELevel;
/// MMX, 3DNow, 3DNow Athlon, or none supported.
X863DNowEnum X863DNowLevel;
+ /// True if the processor supports X87 instructions.
+ bool HasX87;
+
/// True if this processor has conditional move instructions
/// (generally pentium pro+).
bool HasCMov;
@@ -134,6 +138,12 @@ protected:
/// Processor has BMI2 instructions.
bool HasBMI2;
+ /// Processor has VBMI instructions.
+ bool HasVBMI;
+
+ /// Processor has Integer Fused Multiply Add
+ bool HasIFMA;
+
/// Processor has RTM instructions.
bool HasRTM;
@@ -155,6 +165,12 @@ protected:
/// Processor has LAHF/SAHF instructions.
bool HasLAHFSAHF;
+ /// Processor has MONITORX/MWAITX instructions.
+ bool HasMWAITX;
+
+ /// Processor has Prefetch with intent to Write instruction
+ bool HasPFPREFETCHWT1;
+
/// True if BT (bit test) of memory instructions are slow.
bool IsBTMemSlow;
@@ -179,6 +195,10 @@ protected:
/// the stack pointer. This is an optimization for Intel Atom processors.
bool UseLeaForSP;
+ /// True if there is no performance penalty to writing only the lower parts
+ /// of a YMM register without clearing the upper part.
+ bool HasFastPartialYMMWrite;
+
/// True if 8-bit divisions are significantly faster than
/// 32-bit divisions and should be used when possible.
bool HasSlowDivide32;
@@ -226,9 +246,30 @@ protected:
/// Processor has PKU extenstions
bool HasPKU;
- /// Processot supports MPX - Memory Protection Extensions
+ /// Processor supports MPX - Memory Protection Extensions
bool HasMPX;
+ /// Processor supports Invalidate Process-Context Identifier
+ bool HasInvPCId;
+
+ /// Processor has VM Functions
+ bool HasVMFUNC;
+
+ /// Processor has Supervisor Mode Access Protection
+ bool HasSMAP;
+
+ /// Processor has Software Guard Extensions
+ bool HasSGX;
+
+ /// Processor supports Flush Cache Line instruction
+ bool HasCLFLUSHOPT;
+
+ /// Processor has Persistent Commit feature
+ bool HasPCOMMIT;
+
+ /// Processor supports Cache Line Write Back instruction
+ bool HasCLWB;
+
/// Use software floating point for code generation.
bool UseSoftFloat;
@@ -271,7 +312,7 @@ public:
/// This constructor initializes the data members to match that
/// of the specified triple.
///
- X86Subtarget(const Triple &TT, const std::string &CPU, const std::string &FS,
+ X86Subtarget(const Triple &TT, StringRef CPU, StringRef FS,
const X86TargetMachine &TM, unsigned StackAlignOverride);
const X86TargetLowering *getTargetLowering() const override {
@@ -336,6 +377,7 @@ public:
PICStyles::Style getPICStyle() const { return PICStyle; }
void setPICStyle(PICStyles::Style Style) { PICStyle = Style; }
+ bool hasX87() const { return HasX87; }
bool hasCMov() const { return HasCMov; }
bool hasSSE1() const { return X86SSELevel >= SSE1; }
bool hasSSE2() const { return X86SSELevel >= SSE2; }
@@ -374,6 +416,8 @@ public:
bool hasLZCNT() const { return HasLZCNT; }
bool hasBMI() const { return HasBMI; }
bool hasBMI2() const { return HasBMI2; }
+ bool hasVBMI() const { return HasVBMI; }
+ bool hasIFMA() const { return HasIFMA; }
bool hasRTM() const { return HasRTM; }
bool hasHLE() const { return HasHLE; }
bool hasADX() const { return HasADX; }
@@ -381,6 +425,7 @@ public:
bool hasPRFCHW() const { return HasPRFCHW; }
bool hasRDSEED() const { return HasRDSEED; }
bool hasLAHFSAHF() const { return HasLAHFSAHF; }
+ bool hasMWAITX() const { return HasMWAITX; }
bool isBTMemSlow() const { return IsBTMemSlow; }
bool isSHLDSlow() const { return IsSHLDSlow; }
bool isUnalignedMem16Slow() const { return IsUAMem16Slow; }
@@ -388,6 +433,7 @@ public:
bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }
bool hasCmpxchg16b() const { return HasCmpxchg16b; }
bool useLeaForSP() const { return UseLeaForSP; }
+ bool hasFastPartialYMMWrite() const { return HasFastPartialYMMWrite; }
bool hasSlowDivide32() const { return HasSlowDivide32; }
bool hasSlowDivide64() const { return HasSlowDivide64; }
bool padShortFunctions() const { return PadShortFunctions; }
@@ -408,6 +454,11 @@ public:
bool isSLM() const { return X86ProcFamily == IntelSLM; }
bool useSoftFloat() const { return UseSoftFloat; }
+ /// Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
+ /// no-sse2). There isn't any reason to disable it if the target processor
+ /// supports it.
+ bool hasMFence() const { return hasSSE2() || is64Bit(); }
+
const Triple &getTargetTriple() const { return TargetTriple; }
bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
@@ -421,6 +472,8 @@ public:
bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
+ bool isTargetKFreeBSD() const { return TargetTriple.isOSKFreeBSD(); }
+ bool isTargetGlibc() const { return TargetTriple.isOSGlibc(); }
bool isTargetAndroid() const { return TargetTriple.isAndroid(); }
bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
bool isTargetNaCl32() const { return isTargetNaCl() && !is64Bit(); }
@@ -463,7 +516,6 @@ public:
return !In64BitMode && (isTargetCygMing() || isTargetKnownWindowsMSVC());
}
- bool isPICStyleSet() const { return PICStyle != PICStyles::None; }
bool isPICStyleGOT() const { return PICStyle == PICStyles::GOT; }
bool isPICStyleRIPRel() const { return PICStyle == PICStyles::RIPRel; }
@@ -471,13 +523,7 @@ public:
return PICStyle == PICStyles::StubPIC;
}
- bool isPICStyleStubNoDynamic() const {
- return PICStyle == PICStyles::StubDynamicNoPIC;
- }
- bool isPICStyleStubAny() const {
- return PICStyle == PICStyles::StubDynamicNoPIC ||
- PICStyle == PICStyles::StubPIC;
- }
+ bool isPositionIndependent() const { return TM.isPositionIndependent(); }
bool isCallingConvWin64(CallingConv::ID CC) const {
switch (CC) {
@@ -502,18 +548,25 @@ public:
}
}
- /// ClassifyGlobalReference - Classify a global variable reference for the
- /// current subtarget according to how we should reference it in a non-pcrel
- /// context.
- unsigned char ClassifyGlobalReference(const GlobalValue *GV,
- const TargetMachine &TM)const;
+ /// Classify a global variable reference for the current subtarget according
+ /// to how we should reference it in a non-pcrel context.
+ unsigned char classifyLocalReference(const GlobalValue *GV) const;
+
+ unsigned char classifyGlobalReference(const GlobalValue *GV,
+ const Module &M) const;
+ unsigned char classifyGlobalReference(const GlobalValue *GV) const;
+
+ /// Classify a global function reference for the current subtarget.
+ unsigned char classifyGlobalFunctionReference(const GlobalValue *GV,
+ const Module &M) const;
+ unsigned char classifyGlobalFunctionReference(const GlobalValue *GV) const;
/// Classify a blockaddress reference for the current subtarget according to
/// how we should reference it in a non-pcrel context.
- unsigned char ClassifyBlockAddressReference() const;
+ unsigned char classifyBlockAddressReference() const;
/// Return true if the subtarget allows calls to immediate address.
- bool IsLegalToCallImmediateAddr(const TargetMachine &TM) const;
+ bool isLegalToCallImmediateAddr() const;
/// This function returns the name of a function which has an interface
/// like the non-standard bzero function, if such a function exists on
diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp
index 0e7e4c0c84a96..50c9c25a27c08 100644
--- a/lib/Target/X86/X86TargetMachine.cpp
+++ b/lib/Target/X86/X86TargetMachine.cpp
@@ -16,6 +16,7 @@
#include "X86TargetObjectFile.h"
#include "X86TargetTransformInfo.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Support/CommandLine.h"
@@ -39,6 +40,7 @@ extern "C" void LLVMInitializeX86Target() {
PassRegistry &PR = *PassRegistry::getPassRegistry();
initializeWinEHStatePassPass(PR);
+ initializeFixupBWInstPassPass(PR);
}
static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
@@ -73,17 +75,22 @@ static std::string computeDataLayout(const Triple &TT) {
// Some ABIs align 64 bit integers and doubles to 64 bits, others to 32.
if (TT.isArch64Bit() || TT.isOSWindows() || TT.isOSNaCl())
Ret += "-i64:64";
+ else if (TT.isOSIAMCU())
+ Ret += "-i64:32-f64:32";
else
Ret += "-f64:32:64";
// Some ABIs align long double to 128 bits, others to 32.
- if (TT.isOSNaCl())
+ if (TT.isOSNaCl() || TT.isOSIAMCU())
; // No f80
else if (TT.isArch64Bit() || TT.isOSDarwin())
Ret += "-f80:128";
else
Ret += "-f80:32";
+ if (TT.isOSIAMCU())
+ Ret += "-f128:32";
+
// The registers can hold 8, 16, 32 or, in x86-64, 64 bits.
if (TT.isArch64Bit())
Ret += "-n8:16:32:64";
@@ -91,7 +98,7 @@ static std::string computeDataLayout(const Triple &TT) {
Ret += "-n8:16:32";
// The stack is aligned to 32 bits on some ABIs and 128 bits on others.
- if (!TT.isArch64Bit() && TT.isOSWindows())
+ if ((!TT.isArch64Bit() && TT.isOSWindows()) || TT.isOSIAMCU())
Ret += "-a:0:32-S32";
else
Ret += "-S128";
@@ -99,22 +106,60 @@ static std::string computeDataLayout(const Triple &TT) {
return Ret;
}
-/// X86TargetMachine ctor - Create an X86 target.
+static Reloc::Model getEffectiveRelocModel(const Triple &TT,
+ Optional<Reloc::Model> RM) {
+ bool is64Bit = TT.getArch() == Triple::x86_64;
+ if (!RM.hasValue()) {
+ // Darwin defaults to PIC in 64 bit mode and dynamic-no-pic in 32 bit mode.
+ // Win64 requires rip-rel addressing, thus we force it to PIC. Otherwise we
+ // use static relocation model by default.
+ if (TT.isOSDarwin()) {
+ if (is64Bit)
+ return Reloc::PIC_;
+ return Reloc::DynamicNoPIC;
+ }
+ if (TT.isOSWindows() && is64Bit)
+ return Reloc::PIC_;
+ return Reloc::Static;
+ }
+
+ // ELF and X86-64 don't have a distinct DynamicNoPIC model. DynamicNoPIC
+ // is defined as a model for code which may be used in static or dynamic
+ // executables but not necessarily a shared library. On X86-32 we just
+ // compile in -static mode, in x86-64 we use PIC.
+ if (*RM == Reloc::DynamicNoPIC) {
+ if (is64Bit)
+ return Reloc::PIC_;
+ if (!TT.isOSDarwin())
+ return Reloc::Static;
+ }
+
+ // If we are on Darwin, disallow static relocation model in X86-64 mode, since
+ // the Mach-O file format doesn't support it.
+ if (*RM == Reloc::Static && TT.isOSDarwin() && is64Bit)
+ return Reloc::PIC_;
+
+ return *RM;
+}
+
+/// Create an X86 target.
///
X86TargetMachine::X86TargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : LLVMTargetMachine(T, computeDataLayout(TT), TT, CPU, FS, Options, RM, CM,
- OL),
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL)
+ : LLVMTargetMachine(T, computeDataLayout(TT), TT, CPU, FS, Options,
+ getEffectiveRelocModel(TT, RM), CM, OL),
TLOF(createTLOF(getTargetTriple())),
Subtarget(TT, CPU, FS, *this, Options.StackAlignmentOverride) {
// Windows stack unwinder gets confused when execution flow "falls through"
// after a call to 'noreturn' function.
// To prevent that, we emit a trap for 'unreachable' IR instructions.
// (which on X86, happens to be the 'ud2' instruction)
- if (Subtarget.isTargetWin64())
+ // On PS4, the "return address" of a 'noreturn' call must still be within
+ // the calling function, and TrapUnreachable is an easy way to get that.
+ if (Subtarget.isTargetWin64() || Subtarget.isTargetPS4())
this->Options.TrapUnreachable = true;
// By default (and when -ffast-math is on), enable estimate codegen for
@@ -137,12 +182,17 @@ X86TargetMachine::getSubtargetImpl(const Function &F) const {
Attribute CPUAttr = F.getFnAttribute("target-cpu");
Attribute FSAttr = F.getFnAttribute("target-features");
- std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
- ? CPUAttr.getValueAsString().str()
- : TargetCPU;
- std::string FS = !FSAttr.hasAttribute(Attribute::None)
- ? FSAttr.getValueAsString().str()
- : TargetFS;
+ StringRef CPU = !CPUAttr.hasAttribute(Attribute::None)
+ ? CPUAttr.getValueAsString()
+ : (StringRef)TargetCPU;
+ StringRef FS = !FSAttr.hasAttribute(Attribute::None)
+ ? FSAttr.getValueAsString()
+ : (StringRef)TargetFS;
+
+ SmallString<512> Key;
+ Key.reserve(CPU.size() + FS.size());
+ Key += CPU;
+ Key += FS;
// FIXME: This is related to the code below to reset the target options,
// we need to know whether or not the soft float flag is set on the
@@ -150,14 +200,15 @@ X86TargetMachine::getSubtargetImpl(const Function &F) const {
// it as a key for the subtarget since that can be the only difference
// between two functions.
bool SoftFloat =
- F.hasFnAttribute("use-soft-float") &&
F.getFnAttribute("use-soft-float").getValueAsString() == "true";
// If the soft float attribute is set on the function turn on the soft float
// subtarget feature.
if (SoftFloat)
- FS += FS.empty() ? "+soft-float" : ",+soft-float";
+ Key += FS.empty() ? "+soft-float" : ",+soft-float";
- auto &I = SubtargetMap[CPU + FS];
+ FS = Key.substr(CPU.size());
+
+ auto &I = SubtargetMap[Key];
if (!I) {
// This needs to be done before we create a new subtarget since any
// creation will depend on the TM and the code generation flags on the
@@ -234,7 +285,6 @@ bool X86PassConfig::addInstSelector() {
addPass(createCleanupLocalDynamicTLSPass());
addPass(createX86GlobalBaseRegPass());
-
return false;
}
@@ -254,10 +304,13 @@ bool X86PassConfig::addPreISel() {
}
void X86PassConfig::addPreRegAlloc() {
- if (getOptLevel() != CodeGenOpt::None)
+ if (getOptLevel() != CodeGenOpt::None) {
+ addPass(createX86FixupSetCC());
addPass(createX86OptimizeLEAs());
+ addPass(createX86CallFrameOptimization());
+ }
- addPass(createX86CallFrameOptimization());
+ addPass(createX86WinAllocaExpander());
}
void X86PassConfig::addPostRegAlloc() {
@@ -274,6 +327,7 @@ void X86PassConfig::addPreEmitPass() {
addPass(createX86IssueVZeroUpperPass());
if (getOptLevel() != CodeGenOpt::None) {
+ addPass(createX86FixupBWInsts());
addPass(createX86PadShortFunctions());
addPass(createX86FixupLEAs());
}
diff --git a/lib/Target/X86/X86TargetMachine.h b/lib/Target/X86/X86TargetMachine.h
index 262955698e441..4734a44315a95 100644
--- a/lib/Target/X86/X86TargetMachine.h
+++ b/lib/Target/X86/X86TargetMachine.h
@@ -30,8 +30,9 @@ class X86TargetMachine final : public LLVMTargetMachine {
public:
X86TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
- StringRef FS, const TargetOptions &Options, Reloc::Model RM,
- CodeModel::Model CM, CodeGenOpt::Level OL);
+ StringRef FS, const TargetOptions &Options,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
~X86TargetMachine() override;
const X86Subtarget *getSubtargetImpl(const Function &F) const override;
diff --git a/lib/Target/X86/X86TargetObjectFile.cpp b/lib/Target/X86/X86TargetObjectFile.cpp
index 782768d0ab161..d664cff5f2c18 100644
--- a/lib/Target/X86/X86TargetObjectFile.cpp
+++ b/lib/Target/X86/X86TargetObjectFile.cpp
@@ -73,53 +73,30 @@ X86LinuxNaClTargetObjectFile::Initialize(MCContext &Ctx,
InitializeELF(TM.Options.UseInitArray);
}
-const MCExpr *X86WindowsTargetObjectFile::getExecutableRelativeSymbol(
- const ConstantExpr *CE, Mangler &Mang, const TargetMachine &TM) const {
- // We are looking for the difference of two symbols, need a subtraction
- // operation.
- const SubOperator *Sub = dyn_cast<SubOperator>(CE);
- if (!Sub)
- return nullptr;
-
- // Symbols must first be numbers before we can subtract them, we need to see a
- // ptrtoint on both subtraction operands.
- const PtrToIntOperator *SubLHS =
- dyn_cast<PtrToIntOperator>(Sub->getOperand(0));
- const PtrToIntOperator *SubRHS =
- dyn_cast<PtrToIntOperator>(Sub->getOperand(1));
- if (!SubLHS || !SubRHS)
- return nullptr;
-
+const MCExpr *X86WindowsTargetObjectFile::lowerRelativeReference(
+ const GlobalValue *LHS, const GlobalValue *RHS, Mangler &Mang,
+ const TargetMachine &TM) const {
// Our symbols should exist in address space zero, cowardly no-op if
// otherwise.
- if (SubLHS->getPointerAddressSpace() != 0 ||
- SubRHS->getPointerAddressSpace() != 0)
+ if (LHS->getType()->getPointerAddressSpace() != 0 ||
+ RHS->getType()->getPointerAddressSpace() != 0)
return nullptr;
// Both ptrtoint instructions must wrap global objects:
// - Only global variables are eligible for image relative relocations.
// - The subtrahend refers to the special symbol __ImageBase, a GlobalVariable.
- const auto *GOLHS = dyn_cast<GlobalObject>(SubLHS->getPointerOperand());
- const auto *GVRHS = dyn_cast<GlobalVariable>(SubRHS->getPointerOperand());
- if (!GOLHS || !GVRHS)
- return nullptr;
-
// We expect __ImageBase to be a global variable without a section, externally
// defined.
//
// It should look something like this: @__ImageBase = external constant i8
- if (GVRHS->isThreadLocal() || GVRHS->getName() != "__ImageBase" ||
- !GVRHS->hasExternalLinkage() || GVRHS->hasInitializer() ||
- GVRHS->hasSection())
- return nullptr;
-
- // An image-relative, thread-local, symbol makes no sense.
- if (GOLHS->isThreadLocal())
+ if (!isa<GlobalObject>(LHS) || !isa<GlobalVariable>(RHS) ||
+ LHS->isThreadLocal() || RHS->isThreadLocal() ||
+ RHS->getName() != "__ImageBase" || !RHS->hasExternalLinkage() ||
+ cast<GlobalVariable>(RHS)->hasInitializer() || RHS->hasSection())
return nullptr;
- return MCSymbolRefExpr::create(TM.getSymbol(GOLHS, Mang),
- MCSymbolRefExpr::VK_COFF_IMGREL32,
- getContext());
+ return MCSymbolRefExpr::create(
+ TM.getSymbol(LHS, Mang), MCSymbolRefExpr::VK_COFF_IMGREL32, getContext());
}
static std::string APIntToHexString(const APInt &AI) {
@@ -154,16 +131,34 @@ static std::string scalarConstantToHexString(const Constant *C) {
}
MCSection *X86WindowsTargetObjectFile::getSectionForConstant(
- const DataLayout &DL, SectionKind Kind, const Constant *C) const {
+ const DataLayout &DL, SectionKind Kind, const Constant *C,
+ unsigned &Align) const {
if (Kind.isMergeableConst() && C) {
const unsigned Characteristics = COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
COFF::IMAGE_SCN_MEM_READ |
COFF::IMAGE_SCN_LNK_COMDAT;
std::string COMDATSymName;
- if (Kind.isMergeableConst4() || Kind.isMergeableConst8())
- COMDATSymName = "__real@" + scalarConstantToHexString(C);
- else if (Kind.isMergeableConst16())
- COMDATSymName = "__xmm@" + scalarConstantToHexString(C);
+ if (Kind.isMergeableConst4()) {
+ if (Align <= 4) {
+ COMDATSymName = "__real@" + scalarConstantToHexString(C);
+ Align = 4;
+ }
+ } else if (Kind.isMergeableConst8()) {
+ if (Align <= 8) {
+ COMDATSymName = "__real@" + scalarConstantToHexString(C);
+ Align = 8;
+ }
+ } else if (Kind.isMergeableConst16()) {
+ if (Align <= 16) {
+ COMDATSymName = "__xmm@" + scalarConstantToHexString(C);
+ Align = 16;
+ }
+ } else if (Kind.isMergeableConst32()) {
+ if (Align <= 32) {
+ COMDATSymName = "__ymm@" + scalarConstantToHexString(C);
+ Align = 32;
+ }
+ }
if (!COMDATSymName.empty())
return getContext().getCOFFSection(".rdata", Characteristics, Kind,
@@ -171,5 +166,5 @@ MCSection *X86WindowsTargetObjectFile::getSectionForConstant(
COFF::IMAGE_COMDAT_SELECT_ANY);
}
- return TargetLoweringObjectFile::getSectionForConstant(DL, Kind, C);
+ return TargetLoweringObjectFile::getSectionForConstant(DL, Kind, C, Align);
}
diff --git a/lib/Target/X86/X86TargetObjectFile.h b/lib/Target/X86/X86TargetObjectFile.h
index 6b2448cc9de60..2e703f1494fa9 100644
--- a/lib/Target/X86/X86TargetObjectFile.h
+++ b/lib/Target/X86/X86TargetObjectFile.h
@@ -40,6 +40,11 @@ namespace llvm {
/// \brief This implemenatation is used for X86 ELF targets that don't
/// have a further specialization.
class X86ELFTargetObjectFile : public TargetLoweringObjectFileELF {
+ public:
+ X86ELFTargetObjectFile() {
+ PLTRelativeVariantKind = MCSymbolRefExpr::VK_PLT;
+ }
+
/// \brief Describe a TLS variable address within debug info.
const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const override;
};
@@ -53,13 +58,15 @@ namespace llvm {
/// \brief This implementation is used for Windows targets on x86 and x86-64.
class X86WindowsTargetObjectFile : public TargetLoweringObjectFileCOFF {
const MCExpr *
- getExecutableRelativeSymbol(const ConstantExpr *CE, Mangler &Mang,
- const TargetMachine &TM) const override;
+ lowerRelativeReference(const GlobalValue *LHS, const GlobalValue *RHS,
+ Mangler &Mang,
+ const TargetMachine &TM) const override;
/// \brief Given a mergeable constant with the specified size and relocation
/// information, return a section that it should be placed in.
MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
- const Constant *C) const override;
+ const Constant *C,
+ unsigned &Align) const override;
};
} // end namespace llvm
diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp
index 2e7bbb2087430..f44a8c6620289 100644
--- a/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -532,21 +532,24 @@ int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
// potential massive combinations (elem_num x src_type x dst_type).
static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
- { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
- { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
- { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
{ ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
{ ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
+ { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
{ ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
+ { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
- { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
- { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
- { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
{ ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 },
{ ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
{ ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
+ { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
+ { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
+ { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
};
+ // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
+ // 256-bit wide vectors.
+
static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
{ ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
{ ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
@@ -560,43 +563,46 @@ int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
// v16i1 -> v16i32 - load + broadcast
{ ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
{ ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
-
{ ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
{ ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
{ ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
{ ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
- { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
- { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
{ ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
{ ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
+ { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
+ { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
+ { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
{ ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
+ { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
{ ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
+ { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
{ ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
{ ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
- { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
- { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
- { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
{ ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
+ { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
+ { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 },
+ { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
{ ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 },
+ { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
+ { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 },
+ { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
{ ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 },
+ { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
+ { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
+ { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
{ ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
- { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
- { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
+ { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
{ ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
- { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
- { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
- { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
- { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
- { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 },
- { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
- { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
- { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
{ ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
- { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 },
- { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 },
- { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
+ { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
+ { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
+ { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
+ { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
{ ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
{ ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 12 },
{ ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 },
@@ -608,20 +614,20 @@ int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
};
static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
- { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
- { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
{ ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
{ ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
{ ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
{ ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
+ { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
+ { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
{ ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
{ ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
{ ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
{ ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
@@ -639,66 +645,69 @@ int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
};
static const TypeConversionCostTblEntry AVXConversionTbl[] = {
- { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
- { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
{ ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
{ ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
{ ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
{ ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
+ { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
+ { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
{ ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
{ ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
{ ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
{ ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
+ { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
+ { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
+ { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
{ ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
{ ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
{ ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
- { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
- { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
- { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
{ ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
- { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
- { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
- { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
- { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
{ ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
- { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
- { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
- { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
{ ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
+ { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
{ ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
+ { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
{ ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
+ { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
{ ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
+ { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
- { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
- { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
- { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
- { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
{ ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
- { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
- { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
- { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
{ ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
+ { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
{ ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
+ { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
{ ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
+ { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
{ ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
+ { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
// The generic code to compute the scalar overhead is currently broken.
// Workaround this limitation by estimating the scalarization overhead
// here. We have roughly 10 instructions per scalar element.
// Multiply that by the vector width.
// FIXME: remove that when PR19268 is fixed.
- { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
- { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 10 },
+ { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 20 },
+ { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
+ { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
- { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
{ ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
+ { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
// This node is expanded into scalarized operations but BasicTTI is overly
// optimistic estimating its cost. It computes 3 per element (one
// vector-extract, one scalar conversion and one vector-insert). The
@@ -706,89 +715,104 @@ int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
// should be factored in too. Inflating the cost per element by 1.
{ ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
{ ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
+
+ { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
+ { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
};
static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
- { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
- { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
- { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
- { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
- { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
- { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
+
+ { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
+ { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 },
{ ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
{ ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
- { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
- { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
{ ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
{ ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
- { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
- { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
+ { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
+ { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
+ { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
+ { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
+ { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
+ { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
+ { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
+ { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
- { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
- { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 },
+ { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 },
+ { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 },
+ { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 },
{ ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
- { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 30 },
{ ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
- { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 },
- { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
- { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 },
- { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 },
+ { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 },
+ { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
+
};
static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
// These are somewhat magic numbers justified by looking at the output of
// Intel's IACA, running some kernels and making sure when we take
// legalization into account the throughput will be overestimated.
- { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
- { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
- { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
- { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
- { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
- { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
- { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
{ ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
- // There are faster sequences for float conversions.
- { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
- { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
- { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
- { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
- { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
- { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
{ ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
- { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
+ { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
+ { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
+ { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
- { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
- { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
- { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
- { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 },
- { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 },
- { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
+
+ { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
+ { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 },
{ ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
{ ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 },
- { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
- { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 },
{ ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
{ ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 },
- { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
- { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
+ { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
+ { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
+ { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 },
+ { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 },
+ { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
+ { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
+ { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
+ { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 },
- { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 },
- { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
+ { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 },
+ { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 },
+ { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
+ { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 },
{ ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 },
- { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
{ ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
- { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 },
- { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
- { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 },
- { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 },
+ { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
+ { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
+ { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 },
};
std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
@@ -859,13 +883,17 @@ int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
+ static const CostTblEntry SSE2CostTbl[] = {
+ { ISD::SETCC, MVT::v2i64, 8 },
+ { ISD::SETCC, MVT::v4i32, 1 },
+ { ISD::SETCC, MVT::v8i16, 1 },
+ { ISD::SETCC, MVT::v16i8, 1 },
+ };
+
static const CostTblEntry SSE42CostTbl[] = {
{ ISD::SETCC, MVT::v2f64, 1 },
{ ISD::SETCC, MVT::v4f32, 1 },
{ ISD::SETCC, MVT::v2i64, 1 },
- { ISD::SETCC, MVT::v4i32, 1 },
- { ISD::SETCC, MVT::v8i16, 1 },
- { ISD::SETCC, MVT::v16i8, 1 },
};
static const CostTblEntry AVX1CostTbl[] = {
@@ -908,12 +936,112 @@ int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
return LT.first * Entry->Cost;
+ if (ST->hasSSE2())
+ if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
+ return LT.first * Entry->Cost;
+
return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
}
+int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<Type *> Tys, FastMathFlags FMF) {
+ static const CostTblEntry XOPCostTbl[] = {
+ { ISD::BITREVERSE, MVT::v4i64, 4 },
+ { ISD::BITREVERSE, MVT::v8i32, 4 },
+ { ISD::BITREVERSE, MVT::v16i16, 4 },
+ { ISD::BITREVERSE, MVT::v32i8, 4 },
+ { ISD::BITREVERSE, MVT::v2i64, 1 },
+ { ISD::BITREVERSE, MVT::v4i32, 1 },
+ { ISD::BITREVERSE, MVT::v8i16, 1 },
+ { ISD::BITREVERSE, MVT::v16i8, 1 },
+ { ISD::BITREVERSE, MVT::i64, 3 },
+ { ISD::BITREVERSE, MVT::i32, 3 },
+ { ISD::BITREVERSE, MVT::i16, 3 },
+ { ISD::BITREVERSE, MVT::i8, 3 }
+ };
+ static const CostTblEntry AVX2CostTbl[] = {
+ { ISD::BITREVERSE, MVT::v4i64, 5 },
+ { ISD::BITREVERSE, MVT::v8i32, 5 },
+ { ISD::BITREVERSE, MVT::v16i16, 5 },
+ { ISD::BITREVERSE, MVT::v32i8, 5 },
+ { ISD::BSWAP, MVT::v4i64, 1 },
+ { ISD::BSWAP, MVT::v8i32, 1 },
+ { ISD::BSWAP, MVT::v16i16, 1 }
+ };
+ static const CostTblEntry AVX1CostTbl[] = {
+ { ISD::BITREVERSE, MVT::v4i64, 10 },
+ { ISD::BITREVERSE, MVT::v8i32, 10 },
+ { ISD::BITREVERSE, MVT::v16i16, 10 },
+ { ISD::BITREVERSE, MVT::v32i8, 10 },
+ { ISD::BSWAP, MVT::v4i64, 4 },
+ { ISD::BSWAP, MVT::v8i32, 4 },
+ { ISD::BSWAP, MVT::v16i16, 4 }
+ };
+ static const CostTblEntry SSSE3CostTbl[] = {
+ { ISD::BITREVERSE, MVT::v2i64, 5 },
+ { ISD::BITREVERSE, MVT::v4i32, 5 },
+ { ISD::BITREVERSE, MVT::v8i16, 5 },
+ { ISD::BITREVERSE, MVT::v16i8, 5 },
+ { ISD::BSWAP, MVT::v2i64, 1 },
+ { ISD::BSWAP, MVT::v4i32, 1 },
+ { ISD::BSWAP, MVT::v8i16, 1 }
+ };
+ static const CostTblEntry SSE2CostTbl[] = {
+ { ISD::BSWAP, MVT::v2i64, 7 },
+ { ISD::BSWAP, MVT::v4i32, 7 },
+ { ISD::BSWAP, MVT::v8i16, 7 }
+ };
+
+ unsigned ISD = ISD::DELETED_NODE;
+ switch (IID) {
+ default:
+ break;
+ case Intrinsic::bitreverse:
+ ISD = ISD::BITREVERSE;
+ break;
+ case Intrinsic::bswap:
+ ISD = ISD::BSWAP;
+ break;
+ }
+
+ // Legalize the type.
+ std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
+ MVT MTy = LT.second;
+
+ // Attempt to lookup cost.
+ if (ST->hasXOP())
+ if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
+ return LT.first * Entry->Cost;
+
+ if (ST->hasAVX2())
+ if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
+ return LT.first * Entry->Cost;
+
+ if (ST->hasAVX())
+ if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
+ return LT.first * Entry->Cost;
+
+ if (ST->hasSSSE3())
+ if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
+ return LT.first * Entry->Cost;
+
+ if (ST->hasSSE2())
+ if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
+ return LT.first * Entry->Cost;
+
+ return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF);
+}
+
+int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<Value *> Args, FastMathFlags FMF) {
+ return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF);
+}
+
int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
assert(Val->isVectorTy() && "This must be a vector type");
+ Type *ScalarType = Val->getScalarType();
+
if (Index != -1U) {
// Legalize the type.
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
@@ -927,11 +1055,17 @@ int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
Index = Index % Width;
// Floating point scalars are already located in index #0.
- if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
+ if (ScalarType->isFloatingPointTy() && Index == 0)
return 0;
}
- return BaseT::getVectorInstrCost(Opcode, Val, Index);
+ // Add to the base cost if we know that the extracted element of a vector is
+ // destined to be moved to and used in the integer register file.
+ int RegisterFileMoveCost = 0;
+ if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
+ RegisterFileMoveCost = 1;
+
+ return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
}
int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
@@ -983,10 +1117,10 @@ int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
// Each load/store unit costs 1.
int Cost = LT.first * 1;
- // On Sandybridge 256bit load/stores are double pumped
- // (but not on Haswell).
- if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
- Cost*=2;
+ // This isn't exactly right. We're using slow unaligned 32-byte accesses as a
+ // proxy for a double-pumped AVX memory interface such as on Sandybridge.
+ if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow())
+ Cost *= 2;
return Cost;
}
@@ -1001,14 +1135,14 @@ int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
unsigned NumElem = SrcVTy->getVectorNumElements();
VectorType *MaskTy =
- VectorType::get(Type::getInt8Ty(getGlobalContext()), NumElem);
+ VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) ||
(Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) ||
!isPowerOf2_32(NumElem)) {
// Scalarization
int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
int ScalarCompareCost = getCmpSelInstrCost(
- Instruction::ICmp, Type::getInt8Ty(getGlobalContext()), nullptr);
+ Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr);
int BranchCost = getCFInstrCost(Instruction::Br);
int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
@@ -1171,7 +1305,7 @@ int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
int64_t Val = Tmp.getSExtValue();
Cost += getIntImmCost(Val);
}
- // We need at least one instruction to materialze the constant.
+ // We need at least one instruction to materialize the constant.
return std::max(1, Cost);
}
@@ -1314,7 +1448,7 @@ int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr,
GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
if (IndexSize < 64 || !GEP)
return IndexSize;
-
+
unsigned NumOfVarIndices = 0;
Value *Ptrs = GEP->getPointerOperand();
if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
@@ -1339,7 +1473,7 @@ int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr,
unsigned IndexSize = (VF >= 16) ? getIndexSizeInBits(Ptr, DL) :
DL.getPointerSizeInBits();
- Type *IndexVTy = VectorType::get(IntegerType::get(getGlobalContext(),
+ Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(),
IndexSize), VF);
std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy);
std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy);
@@ -1374,10 +1508,10 @@ int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
int MaskUnpackCost = 0;
if (VariableMask) {
VectorType *MaskTy =
- VectorType::get(Type::getInt1Ty(getGlobalContext()), VF);
+ VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true);
int ScalarCompareCost =
- getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(getGlobalContext()),
+ getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()),
nullptr);
int BranchCost = getCFInstrCost(Instruction::Br);
MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
@@ -1438,7 +1572,8 @@ bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) {
int DataWidth = isa<PointerType>(ScalarTy) ?
DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
- return (DataWidth >= 32 && ST->hasAVX2());
+ return (DataWidth >= 32 && ST->hasAVX()) ||
+ (DataWidth >= 8 && ST->hasBWI());
}
bool X86TTIImpl::isLegalMaskedStore(Type *DataType) {
diff --git a/lib/Target/X86/X86TargetTransformInfo.h b/lib/Target/X86/X86TargetTransformInfo.h
index adb745e912d1c..ab8046bb9fd46 100644
--- a/lib/Target/X86/X86TargetTransformInfo.h
+++ b/lib/Target/X86/X86TargetTransformInfo.h
@@ -80,6 +80,11 @@ public:
bool VariableMask, unsigned Alignment);
int getAddressComputationCost(Type *PtrTy, bool IsComplex);
+ int getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<Type *> Tys, FastMathFlags FMF);
+ int getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<Value *> Args, FastMathFlags FMF);
+
int getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwiseForm);
int getIntImmCost(int64_t);
diff --git a/lib/Target/X86/X86VZeroUpper.cpp b/lib/Target/X86/X86VZeroUpper.cpp
index 6925b272b4a5b..9320e1e2226fb 100644
--- a/lib/Target/X86/X86VZeroUpper.cpp
+++ b/lib/Target/X86/X86VZeroUpper.cpp
@@ -38,6 +38,10 @@ namespace {
VZeroUpperInserter() : MachineFunctionPass(ID) {}
bool runOnMachineFunction(MachineFunction &MF) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
const char *getPassName() const override {return "X86 vzeroupper inserter";}
private:
@@ -80,6 +84,7 @@ namespace {
BlockStateMap BlockStates;
DirtySuccessorsWorkList DirtySuccessors;
bool EverMadeChange;
+ bool IsX86INTR;
const TargetInstrInfo *TII;
static char ID;
@@ -122,10 +127,9 @@ static bool clobbersAllYmmRegs(const MachineOperand &MO) {
return true;
}
-static bool hasYmmReg(MachineInstr *MI) {
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
- if (MI->isCall() && MO.isRegMask() && !clobbersAllYmmRegs(MO))
+static bool hasYmmReg(MachineInstr &MI) {
+ for (const MachineOperand &MO : MI.operands()) {
+ if (MI.isCall() && MO.isRegMask() && !clobbersAllYmmRegs(MO))
return true;
if (!MO.isReg())
continue;
@@ -137,12 +141,10 @@ static bool hasYmmReg(MachineInstr *MI) {
return false;
}
-/// clobbersAnyYmmReg() - Check if any YMM register will be clobbered by this
-/// instruction.
-static bool callClobbersAnyYmmReg(MachineInstr *MI) {
- assert(MI->isCall() && "Can only be called on call instructions.");
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
+/// Check if any YMM register will be clobbered by this instruction.
+static bool callClobbersAnyYmmReg(MachineInstr &MI) {
+ assert(MI.isCall() && "Can only be called on call instructions.");
+ for (const MachineOperand &MO : MI.operands()) {
if (!MO.isRegMask())
continue;
for (unsigned reg = X86::YMM0; reg <= X86::YMM15; ++reg) {
@@ -153,16 +155,16 @@ static bool callClobbersAnyYmmReg(MachineInstr *MI) {
return false;
}
-// Insert a vzeroupper instruction before I.
+/// Insert a vzeroupper instruction before I.
void VZeroUpperInserter::insertVZeroUpper(MachineBasicBlock::iterator I,
- MachineBasicBlock &MBB) {
+ MachineBasicBlock &MBB) {
DebugLoc dl = I->getDebugLoc();
BuildMI(MBB, I, dl, TII->get(X86::VZEROUPPER));
++NumVZU;
EverMadeChange = true;
}
-// Add MBB to the DirtySuccessors list if it hasn't already been added.
+/// Add MBB to the DirtySuccessors list if it hasn't already been added.
void VZeroUpperInserter::addDirtySuccessor(MachineBasicBlock &MBB) {
if (!BlockStates[MBB.getNumber()].AddedToDirtySuccessors) {
DirtySuccessors.push_back(&MBB);
@@ -170,21 +172,29 @@ void VZeroUpperInserter::addDirtySuccessor(MachineBasicBlock &MBB) {
}
}
-/// processBasicBlock - Loop over all of the instructions in the basic block,
-/// inserting vzeroupper instructions before function calls.
+/// Loop over all of the instructions in the basic block, inserting vzeroupper
+/// instructions before function calls.
void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
- // Start by assuming that the block PASS_THROUGH, which implies no unguarded
+ // Start by assuming that the block is PASS_THROUGH which implies no unguarded
// calls.
BlockExitState CurState = PASS_THROUGH;
BlockStates[MBB.getNumber()].FirstUnguardedCall = MBB.end();
- for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) {
- MachineInstr *MI = I;
- bool isControlFlow = MI->isCall() || MI->isReturn();
+ for (MachineInstr &MI : MBB) {
+ // No need for vzeroupper before iret in interrupt handler function,
+ // epilogue will restore YMM registers if needed.
+ bool IsReturnFromX86INTR = IsX86INTR && MI.isReturn();
+ bool IsControlFlow = MI.isCall() || MI.isReturn();
+
+ // An existing VZERO* instruction resets the state.
+ if (MI.getOpcode() == X86::VZEROALL || MI.getOpcode() == X86::VZEROUPPER) {
+ CurState = EXITS_CLEAN;
+ continue;
+ }
// Shortcut: don't need to check regular instructions in dirty state.
- if (!isControlFlow && CurState == EXITS_DIRTY)
+ if ((!IsControlFlow || IsReturnFromX86INTR) && CurState == EXITS_DIRTY)
continue;
if (hasYmmReg(MI)) {
@@ -196,7 +206,7 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
// Check for control-flow out of the current function (which might
// indirectly execute SSE instructions).
- if (!isControlFlow)
+ if (!IsControlFlow || IsReturnFromX86INTR)
continue;
// If the call won't clobber any YMM register, skip it as well. It usually
@@ -204,22 +214,21 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
// standard calling convention is not used (RegMask is not used to mark
// register clobbered and register usage (def/imp-def/use) is well-defined
// and explicitly specified.
- if (MI->isCall() && !callClobbersAnyYmmReg(MI))
+ if (MI.isCall() && !callClobbersAnyYmmReg(MI))
continue;
- // The VZEROUPPER instruction resets the upper 128 bits of all Intel AVX
- // registers. This instruction has zero latency. In addition, the processor
- // changes back to Clean state, after which execution of Intel SSE
- // instructions or Intel AVX instructions has no transition penalty. Add
- // the VZEROUPPER instruction before any function call/return that might
- // execute SSE code.
+ // The VZEROUPPER instruction resets the upper 128 bits of all AVX
+ // registers. In addition, the processor changes back to Clean state, after
+ // which execution of SSE instructions or AVX instructions has no transition
+ // penalty. Add the VZEROUPPER instruction before any function call/return
+ // that might execute SSE code.
// FIXME: In some cases, we may want to move the VZEROUPPER into a
// predecessor block.
if (CurState == EXITS_DIRTY) {
// After the inserted VZEROUPPER the state becomes clean again, but
// other YMM may appear before other subsequent calls or even before
// the end of the BB.
- insertVZeroUpper(I, MBB);
+ insertVZeroUpper(MI, MBB);
CurState = EXITS_CLEAN;
} else if (CurState == PASS_THROUGH) {
// If this block is currently in pass-through state and we encounter a
@@ -227,7 +236,7 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
// block has successors that exit dirty. Record the location of the call,
// and set the state to EXITS_CLEAN, but do not insert the vzeroupper yet.
// It will be inserted later if necessary.
- BlockStates[MBB.getNumber()].FirstUnguardedCall = I;
+ BlockStates[MBB.getNumber()].FirstUnguardedCall = MI;
CurState = EXITS_CLEAN;
}
}
@@ -244,15 +253,16 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
BlockStates[MBB.getNumber()].ExitState = CurState;
}
-/// runOnMachineFunction - Loop over all of the basic blocks, inserting
-/// vzeroupper instructions before function calls.
+/// Loop over all of the basic blocks, inserting vzeroupper instructions before
+/// function calls.
bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
- if (!ST.hasAVX() || ST.hasAVX512())
+ if (!ST.hasAVX() || ST.hasAVX512() || ST.hasFastPartialYMMWrite())
return false;
TII = ST.getInstrInfo();
MachineRegisterInfo &MRI = MF.getRegInfo();
EverMadeChange = false;
+ IsX86INTR = MF.getFunction()->getCallingConv() == CallingConv::X86_INTR;
bool FnHasLiveInYmm = checkFnHasLiveInYmm(MRI);
@@ -284,12 +294,12 @@ bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
for (MachineBasicBlock &MBB : MF)
processBasicBlock(MBB);
- // If any YMM regs are live in to this function, add the entry block to the
+ // If any YMM regs are live-in to this function, add the entry block to the
// DirtySuccessors list
if (FnHasLiveInYmm)
addDirtySuccessor(MF.front());
- // Re-visit all blocks that are successors of EXITS_DIRTY bsocks. Add
+ // Re-visit all blocks that are successors of EXITS_DIRTY blocks. Add
// vzeroupper instructions to unguarded calls, and propagate EXITS_DIRTY
// through PASS_THROUGH blocks.
while (!DirtySuccessors.empty()) {
@@ -302,16 +312,14 @@ bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
if (BBState.FirstUnguardedCall != MBB.end())
insertVZeroUpper(BBState.FirstUnguardedCall, MBB);
- // If this successor was a pass-through block then it is now dirty, and its
+ // If this successor was a pass-through block, then it is now dirty. Its
// successors need to be added to the worklist (if they haven't been
// already).
if (BBState.ExitState == PASS_THROUGH) {
DEBUG(dbgs() << "MBB #" << MBB.getNumber()
<< " was Pass-through, is now Dirty-out.\n");
- for (MachineBasicBlock::succ_iterator SI = MBB.succ_begin(),
- SE = MBB.succ_end();
- SI != SE; ++SI)
- addDirtySuccessor(**SI);
+ for (MachineBasicBlock *Succ : MBB.successors())
+ addDirtySuccessor(*Succ);
}
}
diff --git a/lib/Target/X86/X86WinAllocaExpander.cpp b/lib/Target/X86/X86WinAllocaExpander.cpp
new file mode 100644
index 0000000000000..cc82074e685f8
--- /dev/null
+++ b/lib/Target/X86/X86WinAllocaExpander.cpp
@@ -0,0 +1,294 @@
+//===----- X86WinAllocaExpander.cpp - Expand WinAlloca pseudo instruction -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a pass that expands WinAlloca pseudo-instructions.
+//
+// It performs a conservative analysis to determine whether each allocation
+// falls within a region of the stack that is safe to use, or whether stack
+// probes must be emitted.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86InstrBuilder.h"
+#include "X86InstrInfo.h"
+#include "X86MachineFunctionInfo.h"
+#include "X86Subtarget.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+using namespace llvm;
+
+namespace {
+
+class X86WinAllocaExpander : public MachineFunctionPass {
+public:
+ X86WinAllocaExpander() : MachineFunctionPass(ID) {}
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+private:
+ /// Strategies for lowering a WinAlloca.
+ enum Lowering { TouchAndSub, Sub, Probe };
+
+ /// Deterministic-order map from WinAlloca instruction to desired lowering.
+ typedef MapVector<MachineInstr*, Lowering> LoweringMap;
+
+ /// Compute which lowering to use for each WinAlloca instruction.
+ void computeLowerings(MachineFunction &MF, LoweringMap& Lowerings);
+
+ /// Get the appropriate lowering based on current offset and amount.
+ Lowering getLowering(int64_t CurrentOffset, int64_t AllocaAmount);
+
+ /// Lower a WinAlloca instruction.
+ void lower(MachineInstr* MI, Lowering L);
+
+ MachineRegisterInfo *MRI;
+ const X86Subtarget *STI;
+ const TargetInstrInfo *TII;
+ const X86RegisterInfo *TRI;
+ unsigned StackPtr;
+ unsigned SlotSize;
+ int64_t StackProbeSize;
+
+ const char *getPassName() const override { return "X86 WinAlloca Expander"; }
+ static char ID;
+};
+
+char X86WinAllocaExpander::ID = 0;
+
+} // end anonymous namespace
+
+FunctionPass *llvm::createX86WinAllocaExpander() {
+ return new X86WinAllocaExpander();
+}
+
+/// Return the allocation amount for a WinAlloca instruction, or -1 if unknown.
+static int64_t getWinAllocaAmount(MachineInstr *MI, MachineRegisterInfo *MRI) {
+ assert(MI->getOpcode() == X86::WIN_ALLOCA_32 ||
+ MI->getOpcode() == X86::WIN_ALLOCA_64);
+ assert(MI->getOperand(0).isReg());
+
+ unsigned AmountReg = MI->getOperand(0).getReg();
+ MachineInstr *Def = MRI->getUniqueVRegDef(AmountReg);
+
+ // Look through copies.
+ while (Def && Def->isCopy() && Def->getOperand(1).isReg())
+ Def = MRI->getUniqueVRegDef(Def->getOperand(1).getReg());
+
+ if (!Def ||
+ (Def->getOpcode() != X86::MOV32ri && Def->getOpcode() != X86::MOV64ri) ||
+ !Def->getOperand(1).isImm())
+ return -1;
+
+ return Def->getOperand(1).getImm();
+}
+
+X86WinAllocaExpander::Lowering
+X86WinAllocaExpander::getLowering(int64_t CurrentOffset,
+ int64_t AllocaAmount) {
+ // For a non-constant amount or a large amount, we have to probe.
+ if (AllocaAmount < 0 || AllocaAmount > StackProbeSize)
+ return Probe;
+
+ // If it fits within the safe region of the stack, just subtract.
+ if (CurrentOffset + AllocaAmount <= StackProbeSize)
+ return Sub;
+
+ // Otherwise, touch the current tip of the stack, then subtract.
+ return TouchAndSub;
+}
+
+static bool isPushPop(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ case X86::PUSH32i8:
+ case X86::PUSH32r:
+ case X86::PUSH32rmm:
+ case X86::PUSH32rmr:
+ case X86::PUSHi32:
+ case X86::PUSH64i8:
+ case X86::PUSH64r:
+ case X86::PUSH64rmm:
+ case X86::PUSH64rmr:
+ case X86::PUSH64i32:
+ case X86::POP32r:
+ case X86::POP64r:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void X86WinAllocaExpander::computeLowerings(MachineFunction &MF,
+ LoweringMap &Lowerings) {
+ // Do a one-pass reverse post-order walk of the CFG to conservatively estimate
+ // the offset between the stack pointer and the lowest touched part of the
+ // stack, and use that to decide how to lower each WinAlloca instruction.
+
+ // Initialize OutOffset[B], the stack offset at exit from B, to something big.
+ DenseMap<MachineBasicBlock *, int64_t> OutOffset;
+ for (MachineBasicBlock &MBB : MF)
+ OutOffset[&MBB] = INT32_MAX;
+
+ // Note: we don't know the offset at the start of the entry block since the
+ // prologue hasn't been inserted yet, and how much that will adjust the stack
+ // pointer depends on register spills, which have not been computed yet.
+
+ // Compute the reverse post-order.
+ ReversePostOrderTraversal<MachineFunction*> RPO(&MF);
+
+ for (MachineBasicBlock *MBB : RPO) {
+ int64_t Offset = -1;
+ for (MachineBasicBlock *Pred : MBB->predecessors())
+ Offset = std::max(Offset, OutOffset[Pred]);
+ if (Offset == -1) Offset = INT32_MAX;
+
+ for (MachineInstr &MI : *MBB) {
+ if (MI.getOpcode() == X86::WIN_ALLOCA_32 ||
+ MI.getOpcode() == X86::WIN_ALLOCA_64) {
+ // A WinAlloca moves StackPtr, and potentially touches it.
+ int64_t Amount = getWinAllocaAmount(&MI, MRI);
+ Lowering L = getLowering(Offset, Amount);
+ Lowerings[&MI] = L;
+ switch (L) {
+ case Sub:
+ Offset += Amount;
+ break;
+ case TouchAndSub:
+ Offset = Amount;
+ break;
+ case Probe:
+ Offset = 0;
+ break;
+ }
+ } else if (MI.isCall() || isPushPop(MI)) {
+ // Calls, pushes and pops touch the tip of the stack.
+ Offset = 0;
+ } else if (MI.getOpcode() == X86::ADJCALLSTACKUP32 ||
+ MI.getOpcode() == X86::ADJCALLSTACKUP64) {
+ Offset -= MI.getOperand(0).getImm();
+ } else if (MI.getOpcode() == X86::ADJCALLSTACKDOWN32 ||
+ MI.getOpcode() == X86::ADJCALLSTACKDOWN64) {
+ Offset += MI.getOperand(0).getImm();
+ } else if (MI.modifiesRegister(StackPtr, TRI)) {
+ // Any other modification of SP means we've lost track of it.
+ Offset = INT32_MAX;
+ }
+ }
+
+ OutOffset[MBB] = Offset;
+ }
+}
+
+static unsigned getSubOpcode(bool Is64Bit, int64_t Amount) {
+ if (Is64Bit)
+ return isInt<8>(Amount) ? X86::SUB64ri8 : X86::SUB64ri32;
+ return isInt<8>(Amount) ? X86::SUB32ri8 : X86::SUB32ri;
+}
+
+void X86WinAllocaExpander::lower(MachineInstr* MI, Lowering L) {
+ DebugLoc DL = MI->getDebugLoc();
+ MachineBasicBlock *MBB = MI->getParent();
+ MachineBasicBlock::iterator I = *MI;
+
+ int64_t Amount = getWinAllocaAmount(MI, MRI);
+ if (Amount == 0) {
+ MI->eraseFromParent();
+ return;
+ }
+
+ bool Is64Bit = STI->is64Bit();
+ assert(SlotSize == 4 || SlotSize == 8);
+ unsigned RegA = (SlotSize == 8) ? X86::RAX : X86::EAX;
+
+ switch (L) {
+ case TouchAndSub:
+ assert(Amount >= SlotSize);
+
+ // Use a push to touch the top of the stack.
+ BuildMI(*MBB, I, DL, TII->get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
+ .addReg(RegA, RegState::Undef);
+ Amount -= SlotSize;
+ if (!Amount)
+ break;
+
+ // Fall through to make any remaining adjustment.
+ case Sub:
+ assert(Amount > 0);
+ if (Amount == SlotSize) {
+ // Use push to save size.
+ BuildMI(*MBB, I, DL, TII->get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
+ .addReg(RegA, RegState::Undef);
+ } else {
+ // Sub.
+ BuildMI(*MBB, I, DL, TII->get(getSubOpcode(Is64Bit, Amount)), StackPtr)
+ .addReg(StackPtr)
+ .addImm(Amount);
+ }
+ break;
+ case Probe:
+ // The probe lowering expects the amount in RAX/EAX.
+ BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::COPY), RegA)
+ .addReg(MI->getOperand(0).getReg());
+
+ // Do the probe.
+ STI->getFrameLowering()->emitStackProbe(*MBB->getParent(), *MBB, MI, DL,
+ /*InPrologue=*/false);
+ break;
+ }
+
+ unsigned AmountReg = MI->getOperand(0).getReg();
+ MI->eraseFromParent();
+
+ // Delete the definition of AmountReg, possibly walking a chain of copies.
+ for (;;) {
+ if (!MRI->use_empty(AmountReg))
+ break;
+ MachineInstr *AmountDef = MRI->getUniqueVRegDef(AmountReg);
+ if (!AmountDef)
+ break;
+ if (AmountDef->isCopy() && AmountDef->getOperand(1).isReg())
+ AmountReg = AmountDef->getOperand(1).isReg();
+ AmountDef->eraseFromParent();
+ break;
+ }
+}
+
+bool X86WinAllocaExpander::runOnMachineFunction(MachineFunction &MF) {
+ if (!MF.getInfo<X86MachineFunctionInfo>()->hasWinAlloca())
+ return false;
+
+ MRI = &MF.getRegInfo();
+ STI = &MF.getSubtarget<X86Subtarget>();
+ TII = STI->getInstrInfo();
+ TRI = STI->getRegisterInfo();
+ StackPtr = TRI->getStackRegister();
+ SlotSize = TRI->getSlotSize();
+
+ StackProbeSize = 4096;
+ if (MF.getFunction()->hasFnAttribute("stack-probe-size")) {
+ MF.getFunction()
+ ->getFnAttribute("stack-probe-size")
+ .getValueAsString()
+ .getAsInteger(0, StackProbeSize);
+ }
+
+ LoweringMap Lowerings;
+ computeLowerings(MF, Lowerings);
+ for (auto &P : Lowerings)
+ lower(P.first, P.second);
+
+ return true;
+}
diff --git a/lib/Target/X86/X86WinEHState.cpp b/lib/Target/X86/X86WinEHState.cpp
index dce94a9e9ef78..99387edef99a9 100644
--- a/lib/Target/X86/X86WinEHState.cpp
+++ b/lib/Target/X86/X86WinEHState.cpp
@@ -15,33 +15,32 @@
//===----------------------------------------------------------------------===//
#include "X86.h"
+#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
-#include "llvm/IR/Dominators.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Function.h"
-#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Module.h"
-#include "llvm/IR/PatternMatch.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/Cloning.h"
-#include "llvm/Transforms/Utils/Local.h"
+#include <deque>
using namespace llvm;
-using namespace llvm::PatternMatch;
#define DEBUG_TYPE "winehstate"
-namespace llvm { void initializeWinEHStatePassPass(PassRegistry &); }
+namespace llvm {
+void initializeWinEHStatePassPass(PassRegistry &);
+}
namespace {
+const int OverdefinedState = INT_MIN;
+
class WinEHStatePass : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid.
@@ -68,12 +67,20 @@ private:
void linkExceptionRegistration(IRBuilder<> &Builder, Function *Handler);
void unlinkExceptionRegistration(IRBuilder<> &Builder);
void addStateStores(Function &F, WinEHFuncInfo &FuncInfo);
- void insertStateNumberStore(Value *ParentRegNode, Instruction *IP, int State);
+ void insertStateNumberStore(Instruction *IP, int State);
Value *emitEHLSDA(IRBuilder<> &Builder, Function *F);
Function *generateLSDAInEAXThunk(Function *ParentFunc);
+ bool isStateStoreNeeded(EHPersonality Personality, CallSite CS);
+ void rewriteSetJmpCallSite(IRBuilder<> &Builder, Function &F, CallSite CS,
+ Value *State);
+ int getBaseStateForBB(DenseMap<BasicBlock *, ColorVector> &BlockColors,
+ WinEHFuncInfo &FuncInfo, BasicBlock *BB);
+ int getStateForCallSite(DenseMap<BasicBlock *, ColorVector> &BlockColors,
+ WinEHFuncInfo &FuncInfo, CallSite CS);
+
// Module-level type getters.
Type *getEHLinkRegistrationType();
Type *getSEHRegistrationType();
@@ -84,20 +91,23 @@ private:
StructType *EHLinkRegistrationTy = nullptr;
StructType *CXXEHRegistrationTy = nullptr;
StructType *SEHRegistrationTy = nullptr;
- Function *FrameRecover = nullptr;
- Function *FrameAddress = nullptr;
- Function *FrameEscape = nullptr;
+ Constant *SetJmp3 = nullptr;
+ Constant *CxxLongjmpUnwind = nullptr;
// Per-function state
EHPersonality Personality = EHPersonality::Unknown;
Function *PersonalityFn = nullptr;
+ bool UseStackGuard = false;
+ int ParentBaseState;
+ Constant *SehLongjmpUnwind = nullptr;
+ Constant *Cookie = nullptr;
/// The stack allocation containing all EH data, including the link in the
/// fs:00 chain and the current state.
AllocaInst *RegNode = nullptr;
- /// Struct type of RegNode. Used for GEPing.
- Type *RegNodeTy = nullptr;
+ // The allocation containing the EH security guard.
+ AllocaInst *EHGuardNode = nullptr;
/// The index of the state field of RegNode.
int StateFieldIndex = ~0U;
@@ -116,9 +126,6 @@ INITIALIZE_PASS(WinEHStatePass, "x86-winehstate",
bool WinEHStatePass::doInitialization(Module &M) {
TheModule = &M;
- FrameEscape = Intrinsic::getDeclaration(TheModule, Intrinsic::localescape);
- FrameRecover = Intrinsic::getDeclaration(TheModule, Intrinsic::localrecover);
- FrameAddress = Intrinsic::getDeclaration(TheModule, Intrinsic::frameaddress);
return false;
}
@@ -128,9 +135,10 @@ bool WinEHStatePass::doFinalization(Module &M) {
EHLinkRegistrationTy = nullptr;
CXXEHRegistrationTy = nullptr;
SEHRegistrationTy = nullptr;
- FrameEscape = nullptr;
- FrameRecover = nullptr;
- FrameAddress = nullptr;
+ SetJmp3 = nullptr;
+ CxxLongjmpUnwind = nullptr;
+ SehLongjmpUnwind = nullptr;
+ Cookie = nullptr;
return false;
}
@@ -164,6 +172,13 @@ bool WinEHStatePass::runOnFunction(Function &F) {
if (!HasPads)
return false;
+ Type *Int8PtrType = Type::getInt8PtrTy(TheModule->getContext());
+ SetJmp3 = TheModule->getOrInsertFunction(
+ "_setjmp3", FunctionType::get(
+ Type::getInt32Ty(TheModule->getContext()),
+ {Int8PtrType, Type::getInt32Ty(TheModule->getContext())},
+ /*isVarArg=*/true));
+
// Disable frame pointer elimination in this function.
// FIXME: Do the nested handlers need to keep the parent ebp in ebp, or can we
// use an arbitrary register?
@@ -182,6 +197,10 @@ bool WinEHStatePass::runOnFunction(Function &F) {
// Reset per-function state.
PersonalityFn = nullptr;
Personality = EHPersonality::Unknown;
+ UseStackGuard = false;
+ RegNode = nullptr;
+ EHGuardNode = nullptr;
+
return true;
}
@@ -256,9 +275,14 @@ void WinEHStatePass::emitExceptionRegistrationRecord(Function *F) {
assert(Personality == EHPersonality::MSVC_CXX ||
Personality == EHPersonality::MSVC_X86SEH);
- StringRef PersonalityName = PersonalityFn->getName();
+ // Struct type of RegNode. Used for GEPing.
+ Type *RegNodeTy;
+
IRBuilder<> Builder(&F->getEntryBlock(), F->getEntryBlock().begin());
Type *Int8PtrType = Builder.getInt8PtrTy();
+ Type *Int32Ty = Builder.getInt32Ty();
+ Type *VoidTy = Builder.getVoidTy();
+
if (Personality == EHPersonality::MSVC_CXX) {
RegNodeTy = getCXXEHRegistrationType();
RegNode = Builder.CreateAlloca(RegNodeTy);
@@ -268,42 +292,71 @@ void WinEHStatePass::emitExceptionRegistrationRecord(Function *F) {
Builder.CreateStore(SP, Builder.CreateStructGEP(RegNodeTy, RegNode, 0));
// TryLevel = -1
StateFieldIndex = 2;
- insertStateNumberStore(RegNode, &*Builder.GetInsertPoint(), -1);
+ ParentBaseState = -1;
+ insertStateNumberStore(&*Builder.GetInsertPoint(), ParentBaseState);
// Handler = __ehhandler$F
Function *Trampoline = generateLSDAInEAXThunk(F);
Link = Builder.CreateStructGEP(RegNodeTy, RegNode, 1);
linkExceptionRegistration(Builder, Trampoline);
+
+ CxxLongjmpUnwind = TheModule->getOrInsertFunction(
+ "__CxxLongjmpUnwind",
+ FunctionType::get(VoidTy, Int8PtrType, /*isVarArg=*/false));
+ cast<Function>(CxxLongjmpUnwind->stripPointerCasts())
+ ->setCallingConv(CallingConv::X86_StdCall);
} else if (Personality == EHPersonality::MSVC_X86SEH) {
// If _except_handler4 is in use, some additional guard checks and prologue
// stuff is required.
- bool UseStackGuard = (PersonalityName == "_except_handler4");
+ StringRef PersonalityName = PersonalityFn->getName();
+ UseStackGuard = (PersonalityName == "_except_handler4");
+
+ // Allocate local structures.
RegNodeTy = getSEHRegistrationType();
RegNode = Builder.CreateAlloca(RegNodeTy);
+ if (UseStackGuard)
+ EHGuardNode = Builder.CreateAlloca(Int32Ty);
+
// SavedESP = llvm.stacksave()
Value *SP = Builder.CreateCall(
Intrinsic::getDeclaration(TheModule, Intrinsic::stacksave), {});
Builder.CreateStore(SP, Builder.CreateStructGEP(RegNodeTy, RegNode, 0));
// TryLevel = -2 / -1
StateFieldIndex = 4;
- insertStateNumberStore(RegNode, &*Builder.GetInsertPoint(),
- UseStackGuard ? -2 : -1);
+ ParentBaseState = UseStackGuard ? -2 : -1;
+ insertStateNumberStore(&*Builder.GetInsertPoint(), ParentBaseState);
// ScopeTable = llvm.x86.seh.lsda(F)
- Value *FI8 = Builder.CreateBitCast(F, Int8PtrType);
- Value *LSDA = Builder.CreateCall(
- Intrinsic::getDeclaration(TheModule, Intrinsic::x86_seh_lsda), FI8);
- Type *Int32Ty = Type::getInt32Ty(TheModule->getContext());
+ Value *LSDA = emitEHLSDA(Builder, F);
LSDA = Builder.CreatePtrToInt(LSDA, Int32Ty);
// If using _except_handler4, xor the address of the table with
// __security_cookie.
if (UseStackGuard) {
- Value *Cookie =
- TheModule->getOrInsertGlobal("__security_cookie", Int32Ty);
- Value *Val = Builder.CreateLoad(Int32Ty, Cookie);
+ Cookie = TheModule->getOrInsertGlobal("__security_cookie", Int32Ty);
+ Value *Val = Builder.CreateLoad(Int32Ty, Cookie, "cookie");
LSDA = Builder.CreateXor(LSDA, Val);
}
Builder.CreateStore(LSDA, Builder.CreateStructGEP(RegNodeTy, RegNode, 3));
+
+ // If using _except_handler4, the EHGuard contains: FramePtr xor Cookie.
+ if (UseStackGuard) {
+ Value *Val = Builder.CreateLoad(Int32Ty, Cookie);
+ Value *FrameAddr = Builder.CreateCall(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::frameaddress),
+ Builder.getInt32(0), "frameaddr");
+ Value *FrameAddrI32 = Builder.CreatePtrToInt(FrameAddr, Int32Ty);
+ FrameAddrI32 = Builder.CreateXor(FrameAddrI32, Val);
+ Builder.CreateStore(FrameAddrI32, EHGuardNode);
+ }
+
+ // Register the exception handler.
Link = Builder.CreateStructGEP(RegNodeTy, RegNode, 2);
linkExceptionRegistration(Builder, PersonalityFn);
+
+ SehLongjmpUnwind = TheModule->getOrInsertFunction(
+ UseStackGuard ? "_seh_longjmp_unwind4" : "_seh_longjmp_unwind",
+ FunctionType::get(Type::getVoidTy(TheModule->getContext()), Int8PtrType,
+ /*isVarArg=*/false));
+ cast<Function>(SehLongjmpUnwind->stripPointerCasts())
+ ->setCallingConv(CallingConv::X86_StdCall);
} else {
llvm_unreachable("unexpected personality function");
}
@@ -398,15 +451,203 @@ void WinEHStatePass::unlinkExceptionRegistration(IRBuilder<> &Builder) {
Builder.CreateStore(Next, FSZero);
}
+// Calls to setjmp(p) are lowered to _setjmp3(p, 0) by the frontend.
+// The idea behind _setjmp3 is that it takes an optional number of personality
+// specific parameters to indicate how to restore the personality-specific frame
+// state when longjmp is initiated. Typically, the current TryLevel is saved.
+void WinEHStatePass::rewriteSetJmpCallSite(IRBuilder<> &Builder, Function &F,
+ CallSite CS, Value *State) {
+ // Don't rewrite calls with a weird number of arguments.
+ if (CS.getNumArgOperands() != 2)
+ return;
+
+ Instruction *Inst = CS.getInstruction();
+
+ SmallVector<OperandBundleDef, 1> OpBundles;
+ CS.getOperandBundlesAsDefs(OpBundles);
+
+ SmallVector<Value *, 3> OptionalArgs;
+ if (Personality == EHPersonality::MSVC_CXX) {
+ OptionalArgs.push_back(CxxLongjmpUnwind);
+ OptionalArgs.push_back(State);
+ OptionalArgs.push_back(emitEHLSDA(Builder, &F));
+ } else if (Personality == EHPersonality::MSVC_X86SEH) {
+ OptionalArgs.push_back(SehLongjmpUnwind);
+ OptionalArgs.push_back(State);
+ if (UseStackGuard)
+ OptionalArgs.push_back(Cookie);
+ } else {
+ llvm_unreachable("unhandled personality!");
+ }
+
+ SmallVector<Value *, 5> Args;
+ Args.push_back(
+ Builder.CreateBitCast(CS.getArgOperand(0), Builder.getInt8PtrTy()));
+ Args.push_back(Builder.getInt32(OptionalArgs.size()));
+ Args.append(OptionalArgs.begin(), OptionalArgs.end());
+
+ CallSite NewCS;
+ if (CS.isCall()) {
+ auto *CI = cast<CallInst>(Inst);
+ CallInst *NewCI = Builder.CreateCall(SetJmp3, Args, OpBundles);
+ NewCI->setTailCallKind(CI->getTailCallKind());
+ NewCS = NewCI;
+ } else {
+ auto *II = cast<InvokeInst>(Inst);
+ NewCS = Builder.CreateInvoke(
+ SetJmp3, II->getNormalDest(), II->getUnwindDest(), Args, OpBundles);
+ }
+ NewCS.setCallingConv(CS.getCallingConv());
+ NewCS.setAttributes(CS.getAttributes());
+ NewCS->setDebugLoc(CS->getDebugLoc());
+
+ Instruction *NewInst = NewCS.getInstruction();
+ NewInst->takeName(Inst);
+ Inst->replaceAllUsesWith(NewInst);
+ Inst->eraseFromParent();
+}
+
+// Figure out what state we should assign calls in this block.
+int WinEHStatePass::getBaseStateForBB(
+ DenseMap<BasicBlock *, ColorVector> &BlockColors, WinEHFuncInfo &FuncInfo,
+ BasicBlock *BB) {
+ int BaseState = ParentBaseState;
+ auto &BBColors = BlockColors[BB];
+
+ assert(BBColors.size() == 1 && "multi-color BB not removed by preparation");
+ BasicBlock *FuncletEntryBB = BBColors.front();
+ if (auto *FuncletPad =
+ dyn_cast<FuncletPadInst>(FuncletEntryBB->getFirstNonPHI())) {
+ auto BaseStateI = FuncInfo.FuncletBaseStateMap.find(FuncletPad);
+ if (BaseStateI != FuncInfo.FuncletBaseStateMap.end())
+ BaseState = BaseStateI->second;
+ }
+
+ return BaseState;
+}
+
+// Calculate the state a call-site is in.
+int WinEHStatePass::getStateForCallSite(
+ DenseMap<BasicBlock *, ColorVector> &BlockColors, WinEHFuncInfo &FuncInfo,
+ CallSite CS) {
+ if (auto *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
+ // Look up the state number of the EH pad this unwinds to.
+ assert(FuncInfo.InvokeStateMap.count(II) && "invoke has no state!");
+ return FuncInfo.InvokeStateMap[II];
+ }
+ // Possibly throwing call instructions have no actions to take after
+ // an unwind. Ensure they are in the -1 state.
+ return getBaseStateForBB(BlockColors, FuncInfo, CS.getParent());
+}
+
+// Calculate the intersection of all the FinalStates for a BasicBlock's
+// predecessors.
+static int getPredState(DenseMap<BasicBlock *, int> &FinalStates, Function &F,
+ int ParentBaseState, BasicBlock *BB) {
+ // The entry block has no predecessors but we know that the prologue always
+ // sets us up with a fixed state.
+ if (&F.getEntryBlock() == BB)
+ return ParentBaseState;
+
+ // This is an EH Pad, conservatively report this basic block as overdefined.
+ if (BB->isEHPad())
+ return OverdefinedState;
+
+ int CommonState = OverdefinedState;
+ for (BasicBlock *PredBB : predecessors(BB)) {
+ // We didn't manage to get a state for one of these predecessors,
+ // conservatively report this basic block as overdefined.
+ auto PredEndState = FinalStates.find(PredBB);
+ if (PredEndState == FinalStates.end())
+ return OverdefinedState;
+
+ // This code is reachable via exceptional control flow,
+ // conservatively report this basic block as overdefined.
+ if (isa<CatchReturnInst>(PredBB->getTerminator()))
+ return OverdefinedState;
+
+ int PredState = PredEndState->second;
+ assert(PredState != OverdefinedState &&
+ "overdefined BBs shouldn't be in FinalStates");
+ if (CommonState == OverdefinedState)
+ CommonState = PredState;
+
+ // At least two predecessors have different FinalStates,
+ // conservatively report this basic block as overdefined.
+ if (CommonState != PredState)
+ return OverdefinedState;
+ }
+
+ return CommonState;
+}
+
+// Calculate the intersection of all the InitialStates for a BasicBlock's
+// successors.
+static int getSuccState(DenseMap<BasicBlock *, int> &InitialStates, Function &F,
+ int ParentBaseState, BasicBlock *BB) {
+ // This block rejoins normal control flow,
+ // conservatively report this basic block as overdefined.
+ if (isa<CatchReturnInst>(BB->getTerminator()))
+ return OverdefinedState;
+
+ int CommonState = OverdefinedState;
+ for (BasicBlock *SuccBB : successors(BB)) {
+ // We didn't manage to get a state for one of these predecessors,
+ // conservatively report this basic block as overdefined.
+ auto SuccStartState = InitialStates.find(SuccBB);
+ if (SuccStartState == InitialStates.end())
+ return OverdefinedState;
+
+ // This is an EH Pad, conservatively report this basic block as overdefined.
+ if (SuccBB->isEHPad())
+ return OverdefinedState;
+
+ int SuccState = SuccStartState->second;
+ assert(SuccState != OverdefinedState &&
+ "overdefined BBs shouldn't be in FinalStates");
+ if (CommonState == OverdefinedState)
+ CommonState = SuccState;
+
+ // At least two successors have different InitialStates,
+ // conservatively report this basic block as overdefined.
+ if (CommonState != SuccState)
+ return OverdefinedState;
+ }
+
+ return CommonState;
+}
+
+bool WinEHStatePass::isStateStoreNeeded(EHPersonality Personality,
+ CallSite CS) {
+ if (!CS)
+ return false;
+
+ // If the function touches memory, it needs a state store.
+ if (isAsynchronousEHPersonality(Personality))
+ return !CS.doesNotAccessMemory();
+
+ // If the function throws, it needs a state store.
+ return !CS.doesNotThrow();
+}
+
void WinEHStatePass::addStateStores(Function &F, WinEHFuncInfo &FuncInfo) {
// Mark the registration node. The backend needs to know which alloca it is so
// that it can recover the original frame pointer.
- IRBuilder<> Builder(RegNode->getParent(), std::next(RegNode->getIterator()));
+ IRBuilder<> Builder(RegNode->getNextNode());
Value *RegNodeI8 = Builder.CreateBitCast(RegNode, Builder.getInt8PtrTy());
Builder.CreateCall(
Intrinsic::getDeclaration(TheModule, Intrinsic::x86_seh_ehregnode),
{RegNodeI8});
+ if (EHGuardNode) {
+ IRBuilder<> Builder(EHGuardNode->getNextNode());
+ Value *EHGuardNodeI8 =
+ Builder.CreateBitCast(EHGuardNode, Builder.getInt8PtrTy());
+ Builder.CreateCall(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_seh_ehguard),
+ {EHGuardNodeI8});
+ }
+
// Calculate state numbers.
if (isAsynchronousEHPersonality(Personality))
calculateSEHStateNumbers(&F, FuncInfo);
@@ -415,42 +656,141 @@ void WinEHStatePass::addStateStores(Function &F, WinEHFuncInfo &FuncInfo) {
// Iterate all the instructions and emit state number stores.
DenseMap<BasicBlock *, ColorVector> BlockColors = colorEHFunclets(F);
- for (BasicBlock &BB : F) {
- // Figure out what state we should assign calls in this block.
- int BaseState = -1;
- auto &BBColors = BlockColors[&BB];
+ ReversePostOrderTraversal<Function *> RPOT(&F);
+
+ // InitialStates yields the state of the first call-site for a BasicBlock.
+ DenseMap<BasicBlock *, int> InitialStates;
+ // FinalStates yields the state of the last call-site for a BasicBlock.
+ DenseMap<BasicBlock *, int> FinalStates;
+ // Worklist used to revisit BasicBlocks with indeterminate
+ // Initial/Final-States.
+ std::deque<BasicBlock *> Worklist;
+ // Fill in InitialStates and FinalStates for BasicBlocks with call-sites.
+ for (BasicBlock *BB : RPOT) {
+ int InitialState = OverdefinedState;
+ int FinalState;
+ if (&F.getEntryBlock() == BB)
+ InitialState = FinalState = ParentBaseState;
+ for (Instruction &I : *BB) {
+ CallSite CS(&I);
+ if (!isStateStoreNeeded(Personality, CS))
+ continue;
+
+ int State = getStateForCallSite(BlockColors, FuncInfo, CS);
+ if (InitialState == OverdefinedState)
+ InitialState = State;
+ FinalState = State;
+ }
+ // No call-sites in this basic block? That's OK, we will come back to these
+ // in a later pass.
+ if (InitialState == OverdefinedState) {
+ Worklist.push_back(BB);
+ continue;
+ }
+ DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
+ << " InitialState=" << InitialState << '\n');
+ DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
+ << " FinalState=" << FinalState << '\n');
+ InitialStates.insert({BB, InitialState});
+ FinalStates.insert({BB, FinalState});
+ }
- assert(BBColors.size() == 1 &&
- "multi-color BB not removed by preparation");
+ // Try to fill-in InitialStates and FinalStates which have no call-sites.
+ while (!Worklist.empty()) {
+ BasicBlock *BB = Worklist.front();
+ Worklist.pop_front();
+ // This BasicBlock has already been figured out, nothing more we can do.
+ if (InitialStates.count(BB) != 0)
+ continue;
+
+ int PredState = getPredState(FinalStates, F, ParentBaseState, BB);
+ if (PredState == OverdefinedState)
+ continue;
+
+ // We successfully inferred this BasicBlock's state via it's predecessors;
+ // enqueue it's successors to see if we can infer their states.
+ InitialStates.insert({BB, PredState});
+ FinalStates.insert({BB, PredState});
+ for (BasicBlock *SuccBB : successors(BB))
+ Worklist.push_back(SuccBB);
+ }
+
+ // Try to hoist stores from successors.
+ for (BasicBlock *BB : RPOT) {
+ int SuccState = getSuccState(InitialStates, F, ParentBaseState, BB);
+ if (SuccState == OverdefinedState)
+ continue;
+
+ // Update our FinalState to reflect the common InitialState of our
+ // successors.
+ FinalStates.insert({BB, SuccState});
+ }
+
+ // Finally, insert state stores before call-sites which transition us to a new
+ // state.
+ for (BasicBlock *BB : RPOT) {
+ auto &BBColors = BlockColors[BB];
BasicBlock *FuncletEntryBB = BBColors.front();
- if (auto *FuncletPad =
- dyn_cast<FuncletPadInst>(FuncletEntryBB->getFirstNonPHI())) {
- auto BaseStateI = FuncInfo.FuncletBaseStateMap.find(FuncletPad);
- if (BaseStateI != FuncInfo.FuncletBaseStateMap.end())
- BaseState = BaseStateI->second;
+ if (isa<CleanupPadInst>(FuncletEntryBB->getFirstNonPHI()))
+ continue;
+
+ int PrevState = getPredState(FinalStates, F, ParentBaseState, BB);
+ DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
+ << " PrevState=" << PrevState << '\n');
+
+ for (Instruction &I : *BB) {
+ CallSite CS(&I);
+ if (!isStateStoreNeeded(Personality, CS))
+ continue;
+
+ int State = getStateForCallSite(BlockColors, FuncInfo, CS);
+ if (State != PrevState)
+ insertStateNumberStore(&I, State);
+ PrevState = State;
}
- for (Instruction &I : BB) {
- if (auto *CI = dyn_cast<CallInst>(&I)) {
- // Possibly throwing call instructions have no actions to take after
- // an unwind. Ensure they are in the -1 state.
- if (CI->doesNotThrow())
- continue;
- insertStateNumberStore(RegNode, CI, BaseState);
- } else if (auto *II = dyn_cast<InvokeInst>(&I)) {
- // Look up the state number of the landingpad this unwinds to.
- assert(FuncInfo.InvokeStateMap.count(II) && "invoke has no state!");
- int State = FuncInfo.InvokeStateMap[II];
- insertStateNumberStore(RegNode, II, State);
- }
+ // We might have hoisted a state store into this block, emit it now.
+ auto EndState = FinalStates.find(BB);
+ if (EndState != FinalStates.end())
+ if (EndState->second != PrevState)
+ insertStateNumberStore(BB->getTerminator(), EndState->second);
+ }
+
+ SmallVector<CallSite, 1> SetJmp3CallSites;
+ for (BasicBlock *BB : RPOT) {
+ for (Instruction &I : *BB) {
+ CallSite CS(&I);
+ if (!CS)
+ continue;
+ if (CS.getCalledValue()->stripPointerCasts() !=
+ SetJmp3->stripPointerCasts())
+ continue;
+
+ SetJmp3CallSites.push_back(CS);
+ }
+ }
+
+ for (CallSite CS : SetJmp3CallSites) {
+ auto &BBColors = BlockColors[CS->getParent()];
+ BasicBlock *FuncletEntryBB = BBColors.front();
+ bool InCleanup = isa<CleanupPadInst>(FuncletEntryBB->getFirstNonPHI());
+
+ IRBuilder<> Builder(CS.getInstruction());
+ Value *State;
+ if (InCleanup) {
+ Value *StateField =
+ Builder.CreateStructGEP(nullptr, RegNode, StateFieldIndex);
+ State = Builder.CreateLoad(StateField);
+ } else {
+ State = Builder.getInt32(getStateForCallSite(BlockColors, FuncInfo, CS));
}
+ rewriteSetJmpCallSite(Builder, F, CS, State);
}
}
-void WinEHStatePass::insertStateNumberStore(Value *ParentRegNode,
- Instruction *IP, int State) {
+void WinEHStatePass::insertStateNumberStore(Instruction *IP, int State) {
IRBuilder<> Builder(IP);
Value *StateField =
- Builder.CreateStructGEP(RegNodeTy, ParentRegNode, StateFieldIndex);
+ Builder.CreateStructGEP(nullptr, RegNode, StateFieldIndex);
Builder.CreateStore(Builder.getInt32(State), StateField);
}