summaryrefslogtreecommitdiff
path: root/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp')
-rw-r--r--lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp40
1 files changed, 39 insertions, 1 deletions
diff --git a/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index ebf656c549ec..2e3a453f9c75 100644
--- a/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -815,6 +815,10 @@ public:
class AMDGPUAsmParser : public MCTargetAsmParser {
MCAsmParser &Parser;
+ // Number of extra operands parsed after the first optional operand.
+ // This may be necessary to skip hardcoded mandatory operands.
+ static const unsigned MAX_OPR_LOOKAHEAD = 1;
+
unsigned ForcedEncodingSize = 0;
bool ForcedDPP = false;
bool ForcedSDWA = false;
@@ -1037,6 +1041,7 @@ private:
public:
OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
+ OperandMatchResultTy parseOptionalOpr(OperandVector &Operands);
OperandMatchResultTy parseExpTgt(OperandVector &Operands);
OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
@@ -3859,7 +3864,7 @@ AMDGPUAsmParser::parseSwizzleOp(OperandVector &Operands) {
} else {
// Swizzle "offset" operand is optional.
// If it is omitted, try parsing other optional operands.
- return parseOptionalOperand(Operands);
+ return parseOptionalOpr(Operands);
}
}
@@ -4179,6 +4184,39 @@ static const OptionalOperand AMDGPUOptionalOperandTable[] = {
};
OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
+ unsigned size = Operands.size();
+ assert(size > 0);
+
+ OperandMatchResultTy res = parseOptionalOpr(Operands);
+
+ // This is a hack to enable hardcoded mandatory operands which follow
+ // optional operands.
+ //
+ // Current design assumes that all operands after the first optional operand
+ // are also optional. However implementation of some instructions violates
+ // this rule (see e.g. flat/global atomic which have hardcoded 'glc' operands).
+ //
+ // To alleviate this problem, we have to (implicitly) parse extra operands
+ // to make sure autogenerated parser of custom operands never hit hardcoded
+ // mandatory operands.
+
+ if (size == 1 || ((AMDGPUOperand &)*Operands[size - 1]).isRegKind()) {
+
+ // We have parsed the first optional operand.
+ // Parse as many operands as necessary to skip all mandatory operands.
+
+ for (unsigned i = 0; i < MAX_OPR_LOOKAHEAD; ++i) {
+ if (res != MatchOperand_Success ||
+ getLexer().is(AsmToken::EndOfStatement)) break;
+ if (getLexer().is(AsmToken::Comma)) Parser.Lex();
+ res = parseOptionalOpr(Operands);
+ }
+ }
+
+ return res;
+}
+
+OperandMatchResultTy AMDGPUAsmParser::parseOptionalOpr(OperandVector &Operands) {
OperandMatchResultTy res;
for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
// try to parse any optional operand here