aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2019-12-20 19:53:05 +0000
committerDimitry Andric <dim@FreeBSD.org>2019-12-20 19:53:05 +0000
commit0b57cec536236d46e3dba9bd041533462f33dbb7 (patch)
tree56229dbdbbf76d18580f72f789003db17246c8d9 /contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc
parent718ef55ec7785aae63f98f8ca05dc07ed399c16d (diff)
downloadsrc-0b57cec536236d46e3dba9bd041533462f33dbb7.tar.gz
src-0b57cec536236d46e3dba9bd041533462f33dbb7.zip
Notes
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc')
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h848
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp763
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp438
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp216
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h27
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h69
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp1587
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.h222
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp135
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h46
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp649
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp147
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h168
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp412
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h89
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp411
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp69
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h110
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFObjectWriter.cpp126
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp201
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.h27
21 files changed, 6760 insertions, 0 deletions
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
new file mode 100644
index 000000000000..05a909f1780a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
@@ -0,0 +1,848 @@
+//===- AArch64AddressingModes.h - AArch64 Addressing Modes ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the AArch64 addressing mode implementation stuff.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ADDRESSINGMODES_H
+#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ADDRESSINGMODES_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/bit.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+
+namespace llvm {
+
+/// AArch64_AM - AArch64 Addressing Mode Stuff
+namespace AArch64_AM {
+
+//===----------------------------------------------------------------------===//
+// Shifts
+//
+
+enum ShiftExtendType {
+ InvalidShiftExtend = -1,
+ LSL = 0,
+ LSR,
+ ASR,
+ ROR,
+ MSL,
+
+ UXTB,
+ UXTH,
+ UXTW,
+ UXTX,
+
+ SXTB,
+ SXTH,
+ SXTW,
+ SXTX,
+};
+
+/// getShiftName - Get the string encoding for the shift type.
+static inline const char *getShiftExtendName(AArch64_AM::ShiftExtendType ST) {
+ switch (ST) {
+ default: llvm_unreachable("unhandled shift type!");
+ case AArch64_AM::LSL: return "lsl";
+ case AArch64_AM::LSR: return "lsr";
+ case AArch64_AM::ASR: return "asr";
+ case AArch64_AM::ROR: return "ror";
+ case AArch64_AM::MSL: return "msl";
+ case AArch64_AM::UXTB: return "uxtb";
+ case AArch64_AM::UXTH: return "uxth";
+ case AArch64_AM::UXTW: return "uxtw";
+ case AArch64_AM::UXTX: return "uxtx";
+ case AArch64_AM::SXTB: return "sxtb";
+ case AArch64_AM::SXTH: return "sxth";
+ case AArch64_AM::SXTW: return "sxtw";
+ case AArch64_AM::SXTX: return "sxtx";
+ }
+ return nullptr;
+}
+
+/// getShiftType - Extract the shift type.
+static inline AArch64_AM::ShiftExtendType getShiftType(unsigned Imm) {
+ switch ((Imm >> 6) & 0x7) {
+ default: return AArch64_AM::InvalidShiftExtend;
+ case 0: return AArch64_AM::LSL;
+ case 1: return AArch64_AM::LSR;
+ case 2: return AArch64_AM::ASR;
+ case 3: return AArch64_AM::ROR;
+ case 4: return AArch64_AM::MSL;
+ }
+}
+
+/// getShiftValue - Extract the shift value.
+static inline unsigned getShiftValue(unsigned Imm) {
+ return Imm & 0x3f;
+}
+
+/// getShifterImm - Encode the shift type and amount:
+/// imm: 6-bit shift amount
+/// shifter: 000 ==> lsl
+/// 001 ==> lsr
+/// 010 ==> asr
+/// 011 ==> ror
+/// 100 ==> msl
+/// {8-6} = shifter
+/// {5-0} = imm
+static inline unsigned getShifterImm(AArch64_AM::ShiftExtendType ST,
+ unsigned Imm) {
+ assert((Imm & 0x3f) == Imm && "Illegal shifted immedate value!");
+ unsigned STEnc = 0;
+ switch (ST) {
+ default: llvm_unreachable("Invalid shift requested");
+ case AArch64_AM::LSL: STEnc = 0; break;
+ case AArch64_AM::LSR: STEnc = 1; break;
+ case AArch64_AM::ASR: STEnc = 2; break;
+ case AArch64_AM::ROR: STEnc = 3; break;
+ case AArch64_AM::MSL: STEnc = 4; break;
+ }
+ return (STEnc << 6) | (Imm & 0x3f);
+}
+
+//===----------------------------------------------------------------------===//
+// Extends
+//
+
+/// getArithShiftValue - get the arithmetic shift value.
+static inline unsigned getArithShiftValue(unsigned Imm) {
+ return Imm & 0x7;
+}
+
+/// getExtendType - Extract the extend type for operands of arithmetic ops.
+static inline AArch64_AM::ShiftExtendType getExtendType(unsigned Imm) {
+ assert((Imm & 0x7) == Imm && "invalid immediate!");
+ switch (Imm) {
+ default: llvm_unreachable("Compiler bug!");
+ case 0: return AArch64_AM::UXTB;
+ case 1: return AArch64_AM::UXTH;
+ case 2: return AArch64_AM::UXTW;
+ case 3: return AArch64_AM::UXTX;
+ case 4: return AArch64_AM::SXTB;
+ case 5: return AArch64_AM::SXTH;
+ case 6: return AArch64_AM::SXTW;
+ case 7: return AArch64_AM::SXTX;
+ }
+}
+
+static inline AArch64_AM::ShiftExtendType getArithExtendType(unsigned Imm) {
+ return getExtendType((Imm >> 3) & 0x7);
+}
+
+/// Mapping from extend bits to required operation:
+/// shifter: 000 ==> uxtb
+/// 001 ==> uxth
+/// 010 ==> uxtw
+/// 011 ==> uxtx
+/// 100 ==> sxtb
+/// 101 ==> sxth
+/// 110 ==> sxtw
+/// 111 ==> sxtx
+inline unsigned getExtendEncoding(AArch64_AM::ShiftExtendType ET) {
+ switch (ET) {
+ default: llvm_unreachable("Invalid extend type requested");
+ case AArch64_AM::UXTB: return 0; break;
+ case AArch64_AM::UXTH: return 1; break;
+ case AArch64_AM::UXTW: return 2; break;
+ case AArch64_AM::UXTX: return 3; break;
+ case AArch64_AM::SXTB: return 4; break;
+ case AArch64_AM::SXTH: return 5; break;
+ case AArch64_AM::SXTW: return 6; break;
+ case AArch64_AM::SXTX: return 7; break;
+ }
+}
+
+/// getArithExtendImm - Encode the extend type and shift amount for an
+/// arithmetic instruction:
+/// imm: 3-bit extend amount
+/// {5-3} = shifter
+/// {2-0} = imm3
+static inline unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET,
+ unsigned Imm) {
+ assert((Imm & 0x7) == Imm && "Illegal shifted immedate value!");
+ return (getExtendEncoding(ET) << 3) | (Imm & 0x7);
+}
+
+/// getMemDoShift - Extract the "do shift" flag value for load/store
+/// instructions.
+static inline bool getMemDoShift(unsigned Imm) {
+ return (Imm & 0x1) != 0;
+}
+
+/// getExtendType - Extract the extend type for the offset operand of
+/// loads/stores.
+static inline AArch64_AM::ShiftExtendType getMemExtendType(unsigned Imm) {
+ return getExtendType((Imm >> 1) & 0x7);
+}
+
+/// getExtendImm - Encode the extend type and amount for a load/store inst:
+/// doshift: should the offset be scaled by the access size
+/// shifter: 000 ==> uxtb
+/// 001 ==> uxth
+/// 010 ==> uxtw
+/// 011 ==> uxtx
+/// 100 ==> sxtb
+/// 101 ==> sxth
+/// 110 ==> sxtw
+/// 111 ==> sxtx
+/// {3-1} = shifter
+/// {0} = doshift
+static inline unsigned getMemExtendImm(AArch64_AM::ShiftExtendType ET,
+ bool DoShift) {
+ return (getExtendEncoding(ET) << 1) | unsigned(DoShift);
+}
+
+static inline uint64_t ror(uint64_t elt, unsigned size) {
+ return ((elt & 1) << (size-1)) | (elt >> 1);
+}
+
+/// processLogicalImmediate - Determine if an immediate value can be encoded
+/// as the immediate operand of a logical instruction for the given register
+/// size. If so, return true with "encoding" set to the encoded value in
+/// the form N:immr:imms.
+static inline bool processLogicalImmediate(uint64_t Imm, unsigned RegSize,
+ uint64_t &Encoding) {
+ if (Imm == 0ULL || Imm == ~0ULL ||
+ (RegSize != 64 &&
+ (Imm >> RegSize != 0 || Imm == (~0ULL >> (64 - RegSize)))))
+ return false;
+
+ // First, determine the element size.
+ unsigned Size = RegSize;
+
+ do {
+ Size /= 2;
+ uint64_t Mask = (1ULL << Size) - 1;
+
+ if ((Imm & Mask) != ((Imm >> Size) & Mask)) {
+ Size *= 2;
+ break;
+ }
+ } while (Size > 2);
+
+ // Second, determine the rotation to make the element be: 0^m 1^n.
+ uint32_t CTO, I;
+ uint64_t Mask = ((uint64_t)-1LL) >> (64 - Size);
+ Imm &= Mask;
+
+ if (isShiftedMask_64(Imm)) {
+ I = countTrailingZeros(Imm);
+ assert(I < 64 && "undefined behavior");
+ CTO = countTrailingOnes(Imm >> I);
+ } else {
+ Imm |= ~Mask;
+ if (!isShiftedMask_64(~Imm))
+ return false;
+
+ unsigned CLO = countLeadingOnes(Imm);
+ I = 64 - CLO;
+ CTO = CLO + countTrailingOnes(Imm) - (64 - Size);
+ }
+
+ // Encode in Immr the number of RORs it would take to get *from* 0^m 1^n
+ // to our target value, where I is the number of RORs to go the opposite
+ // direction.
+ assert(Size > I && "I should be smaller than element size");
+ unsigned Immr = (Size - I) & (Size - 1);
+
+ // If size has a 1 in the n'th bit, create a value that has zeroes in
+ // bits [0, n] and ones above that.
+ uint64_t NImms = ~(Size-1) << 1;
+
+ // Or the CTO value into the low bits, which must be below the Nth bit
+ // bit mentioned above.
+ NImms |= (CTO-1);
+
+ // Extract the seventh bit and toggle it to create the N field.
+ unsigned N = ((NImms >> 6) & 1) ^ 1;
+
+ Encoding = (N << 12) | (Immr << 6) | (NImms & 0x3f);
+ return true;
+}
+
+/// isLogicalImmediate - Return true if the immediate is valid for a logical
+/// immediate instruction of the given register size. Return false otherwise.
+static inline bool isLogicalImmediate(uint64_t imm, unsigned regSize) {
+ uint64_t encoding;
+ return processLogicalImmediate(imm, regSize, encoding);
+}
+
+/// encodeLogicalImmediate - Return the encoded immediate value for a logical
+/// immediate instruction of the given register size.
+static inline uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize) {
+ uint64_t encoding = 0;
+ bool res = processLogicalImmediate(imm, regSize, encoding);
+ assert(res && "invalid logical immediate");
+ (void)res;
+ return encoding;
+}
+
+/// decodeLogicalImmediate - Decode a logical immediate value in the form
+/// "N:immr:imms" (where the immr and imms fields are each 6 bits) into the
+/// integer value it represents with regSize bits.
+static inline uint64_t decodeLogicalImmediate(uint64_t val, unsigned regSize) {
+ // Extract the N, imms, and immr fields.
+ unsigned N = (val >> 12) & 1;
+ unsigned immr = (val >> 6) & 0x3f;
+ unsigned imms = val & 0x3f;
+
+ assert((regSize == 64 || N == 0) && "undefined logical immediate encoding");
+ int len = 31 - countLeadingZeros((N << 6) | (~imms & 0x3f));
+ assert(len >= 0 && "undefined logical immediate encoding");
+ unsigned size = (1 << len);
+ unsigned R = immr & (size - 1);
+ unsigned S = imms & (size - 1);
+ assert(S != size - 1 && "undefined logical immediate encoding");
+ uint64_t pattern = (1ULL << (S + 1)) - 1;
+ for (unsigned i = 0; i < R; ++i)
+ pattern = ror(pattern, size);
+
+ // Replicate the pattern to fill the regSize.
+ while (size != regSize) {
+ pattern |= (pattern << size);
+ size *= 2;
+ }
+ return pattern;
+}
+
+/// isValidDecodeLogicalImmediate - Check to see if the logical immediate value
+/// in the form "N:immr:imms" (where the immr and imms fields are each 6 bits)
+/// is a valid encoding for an integer value with regSize bits.
+static inline bool isValidDecodeLogicalImmediate(uint64_t val,
+ unsigned regSize) {
+ // Extract the N and imms fields needed for checking.
+ unsigned N = (val >> 12) & 1;
+ unsigned imms = val & 0x3f;
+
+ if (regSize == 32 && N != 0) // undefined logical immediate encoding
+ return false;
+ int len = 31 - countLeadingZeros((N << 6) | (~imms & 0x3f));
+ if (len < 0) // undefined logical immediate encoding
+ return false;
+ unsigned size = (1 << len);
+ unsigned S = imms & (size - 1);
+ if (S == size - 1) // undefined logical immediate encoding
+ return false;
+
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Floating-point Immediates
+//
+static inline float getFPImmFloat(unsigned Imm) {
+ // We expect an 8-bit binary encoding of a floating-point number here.
+
+ uint8_t Sign = (Imm >> 7) & 0x1;
+ uint8_t Exp = (Imm >> 4) & 0x7;
+ uint8_t Mantissa = Imm & 0xf;
+
+ // 8-bit FP IEEE Float Encoding
+ // abcd efgh aBbbbbbc defgh000 00000000 00000000
+ //
+ // where B = NOT(b);
+
+ uint32_t I = 0;
+ I |= Sign << 31;
+ I |= ((Exp & 0x4) != 0 ? 0 : 1) << 30;
+ I |= ((Exp & 0x4) != 0 ? 0x1f : 0) << 25;
+ I |= (Exp & 0x3) << 23;
+ I |= Mantissa << 19;
+ return bit_cast<float>(I);
+}
+
+/// getFP16Imm - Return an 8-bit floating-point version of the 16-bit
+/// floating-point value. If the value cannot be represented as an 8-bit
+/// floating-point value, then return -1.
+static inline int getFP16Imm(const APInt &Imm) {
+ uint32_t Sign = Imm.lshr(15).getZExtValue() & 1;
+ int32_t Exp = (Imm.lshr(10).getSExtValue() & 0x1f) - 15; // -14 to 15
+ int32_t Mantissa = Imm.getZExtValue() & 0x3ff; // 10 bits
+
+ // We can handle 4 bits of mantissa.
+ // mantissa = (16+UInt(e:f:g:h))/16.
+ if (Mantissa & 0x3f)
+ return -1;
+ Mantissa >>= 6;
+
+ // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
+ if (Exp < -3 || Exp > 4)
+ return -1;
+ Exp = ((Exp+3) & 0x7) ^ 4;
+
+ return ((int)Sign << 7) | (Exp << 4) | Mantissa;
+}
+
+static inline int getFP16Imm(const APFloat &FPImm) {
+ return getFP16Imm(FPImm.bitcastToAPInt());
+}
+
+/// getFP32Imm - Return an 8-bit floating-point version of the 32-bit
+/// floating-point value. If the value cannot be represented as an 8-bit
+/// floating-point value, then return -1.
+static inline int getFP32Imm(const APInt &Imm) {
+ uint32_t Sign = Imm.lshr(31).getZExtValue() & 1;
+ int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127
+ int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits
+
+ // We can handle 4 bits of mantissa.
+ // mantissa = (16+UInt(e:f:g:h))/16.
+ if (Mantissa & 0x7ffff)
+ return -1;
+ Mantissa >>= 19;
+ if ((Mantissa & 0xf) != Mantissa)
+ return -1;
+
+ // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
+ if (Exp < -3 || Exp > 4)
+ return -1;
+ Exp = ((Exp+3) & 0x7) ^ 4;
+
+ return ((int)Sign << 7) | (Exp << 4) | Mantissa;
+}
+
+static inline int getFP32Imm(const APFloat &FPImm) {
+ return getFP32Imm(FPImm.bitcastToAPInt());
+}
+
+/// getFP64Imm - Return an 8-bit floating-point version of the 64-bit
+/// floating-point value. If the value cannot be represented as an 8-bit
+/// floating-point value, then return -1.
+static inline int getFP64Imm(const APInt &Imm) {
+ uint64_t Sign = Imm.lshr(63).getZExtValue() & 1;
+ int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023
+ uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffULL;
+
+ // We can handle 4 bits of mantissa.
+ // mantissa = (16+UInt(e:f:g:h))/16.
+ if (Mantissa & 0xffffffffffffULL)
+ return -1;
+ Mantissa >>= 48;
+ if ((Mantissa & 0xf) != Mantissa)
+ return -1;
+
+ // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
+ if (Exp < -3 || Exp > 4)
+ return -1;
+ Exp = ((Exp+3) & 0x7) ^ 4;
+
+ return ((int)Sign << 7) | (Exp << 4) | Mantissa;
+}
+
+static inline int getFP64Imm(const APFloat &FPImm) {
+ return getFP64Imm(FPImm.bitcastToAPInt());
+}
+
+//===--------------------------------------------------------------------===//
+// AdvSIMD Modified Immediates
+//===--------------------------------------------------------------------===//
+
+// 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh
+static inline bool isAdvSIMDModImmType1(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0xffffff00ffffff00ULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType1(uint64_t Imm) {
+ return (Imm & 0xffULL);
+}
+
+static inline uint64_t decodeAdvSIMDModImmType1(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 32) | EncVal;
+}
+
+// 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00
+static inline bool isAdvSIMDModImmType2(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0xffff00ffffff00ffULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType2(uint64_t Imm) {
+ return (Imm & 0xff00ULL) >> 8;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType2(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 40) | (EncVal << 8);
+}
+
+// 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00
+static inline bool isAdvSIMDModImmType3(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0xff00ffffff00ffffULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType3(uint64_t Imm) {
+ return (Imm & 0xff0000ULL) >> 16;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType3(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 48) | (EncVal << 16);
+}
+
+// abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00
+static inline bool isAdvSIMDModImmType4(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0x00ffffff00ffffffULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType4(uint64_t Imm) {
+ return (Imm & 0xff000000ULL) >> 24;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType4(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 56) | (EncVal << 24);
+}
+
+// 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh
+static inline bool isAdvSIMDModImmType5(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ (((Imm & 0x00ff0000ULL) >> 16) == (Imm & 0x000000ffULL)) &&
+ ((Imm & 0xff00ff00ff00ff00ULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType5(uint64_t Imm) {
+ return (Imm & 0xffULL);
+}
+
+static inline uint64_t decodeAdvSIMDModImmType5(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 48) | (EncVal << 32) | (EncVal << 16) | EncVal;
+}
+
+// abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00
+static inline bool isAdvSIMDModImmType6(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ (((Imm & 0xff000000ULL) >> 16) == (Imm & 0x0000ff00ULL)) &&
+ ((Imm & 0x00ff00ff00ff00ffULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType6(uint64_t Imm) {
+ return (Imm & 0xff00ULL) >> 8;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType6(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 56) | (EncVal << 40) | (EncVal << 24) | (EncVal << 8);
+}
+
+// 0x00 0x00 abcdefgh 0xFF 0x00 0x00 abcdefgh 0xFF
+static inline bool isAdvSIMDModImmType7(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0xffff00ffffff00ffULL) == 0x000000ff000000ffULL);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType7(uint64_t Imm) {
+ return (Imm & 0xff00ULL) >> 8;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType7(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 40) | (EncVal << 8) | 0x000000ff000000ffULL;
+}
+
+// 0x00 abcdefgh 0xFF 0xFF 0x00 abcdefgh 0xFF 0xFF
+static inline bool isAdvSIMDModImmType8(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0xff00ffffff00ffffULL) == 0x0000ffff0000ffffULL);
+}
+
+static inline uint64_t decodeAdvSIMDModImmType8(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 48) | (EncVal << 16) | 0x0000ffff0000ffffULL;
+}
+
+static inline uint8_t encodeAdvSIMDModImmType8(uint64_t Imm) {
+ return (Imm & 0x00ff0000ULL) >> 16;
+}
+
+// abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh
+static inline bool isAdvSIMDModImmType9(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm >> 48) == (Imm & 0x0000ffffULL)) &&
+ ((Imm >> 56) == (Imm & 0x000000ffULL));
+}
+
+static inline uint8_t encodeAdvSIMDModImmType9(uint64_t Imm) {
+ return (Imm & 0xffULL);
+}
+
+static inline uint64_t decodeAdvSIMDModImmType9(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ EncVal |= (EncVal << 8);
+ EncVal |= (EncVal << 16);
+ EncVal |= (EncVal << 32);
+ return EncVal;
+}
+
+// aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh
+// cmode: 1110, op: 1
+static inline bool isAdvSIMDModImmType10(uint64_t Imm) {
+ uint64_t ByteA = Imm & 0xff00000000000000ULL;
+ uint64_t ByteB = Imm & 0x00ff000000000000ULL;
+ uint64_t ByteC = Imm & 0x0000ff0000000000ULL;
+ uint64_t ByteD = Imm & 0x000000ff00000000ULL;
+ uint64_t ByteE = Imm & 0x00000000ff000000ULL;
+ uint64_t ByteF = Imm & 0x0000000000ff0000ULL;
+ uint64_t ByteG = Imm & 0x000000000000ff00ULL;
+ uint64_t ByteH = Imm & 0x00000000000000ffULL;
+
+ return (ByteA == 0ULL || ByteA == 0xff00000000000000ULL) &&
+ (ByteB == 0ULL || ByteB == 0x00ff000000000000ULL) &&
+ (ByteC == 0ULL || ByteC == 0x0000ff0000000000ULL) &&
+ (ByteD == 0ULL || ByteD == 0x000000ff00000000ULL) &&
+ (ByteE == 0ULL || ByteE == 0x00000000ff000000ULL) &&
+ (ByteF == 0ULL || ByteF == 0x0000000000ff0000ULL) &&
+ (ByteG == 0ULL || ByteG == 0x000000000000ff00ULL) &&
+ (ByteH == 0ULL || ByteH == 0x00000000000000ffULL);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType10(uint64_t Imm) {
+ uint8_t BitA = (Imm & 0xff00000000000000ULL) != 0;
+ uint8_t BitB = (Imm & 0x00ff000000000000ULL) != 0;
+ uint8_t BitC = (Imm & 0x0000ff0000000000ULL) != 0;
+ uint8_t BitD = (Imm & 0x000000ff00000000ULL) != 0;
+ uint8_t BitE = (Imm & 0x00000000ff000000ULL) != 0;
+ uint8_t BitF = (Imm & 0x0000000000ff0000ULL) != 0;
+ uint8_t BitG = (Imm & 0x000000000000ff00ULL) != 0;
+ uint8_t BitH = (Imm & 0x00000000000000ffULL) != 0;
+
+ uint8_t EncVal = BitA;
+ EncVal <<= 1;
+ EncVal |= BitB;
+ EncVal <<= 1;
+ EncVal |= BitC;
+ EncVal <<= 1;
+ EncVal |= BitD;
+ EncVal <<= 1;
+ EncVal |= BitE;
+ EncVal <<= 1;
+ EncVal |= BitF;
+ EncVal <<= 1;
+ EncVal |= BitG;
+ EncVal <<= 1;
+ EncVal |= BitH;
+ return EncVal;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType10(uint8_t Imm) {
+ uint64_t EncVal = 0;
+ if (Imm & 0x80) EncVal |= 0xff00000000000000ULL;
+ if (Imm & 0x40) EncVal |= 0x00ff000000000000ULL;
+ if (Imm & 0x20) EncVal |= 0x0000ff0000000000ULL;
+ if (Imm & 0x10) EncVal |= 0x000000ff00000000ULL;
+ if (Imm & 0x08) EncVal |= 0x00000000ff000000ULL;
+ if (Imm & 0x04) EncVal |= 0x0000000000ff0000ULL;
+ if (Imm & 0x02) EncVal |= 0x000000000000ff00ULL;
+ if (Imm & 0x01) EncVal |= 0x00000000000000ffULL;
+ return EncVal;
+}
+
+// aBbbbbbc defgh000 0x00 0x00 aBbbbbbc defgh000 0x00 0x00
+static inline bool isAdvSIMDModImmType11(uint64_t Imm) {
+ uint64_t BString = (Imm & 0x7E000000ULL) >> 25;
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ (BString == 0x1f || BString == 0x20) &&
+ ((Imm & 0x0007ffff0007ffffULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType11(uint64_t Imm) {
+ uint8_t BitA = (Imm & 0x80000000ULL) != 0;
+ uint8_t BitB = (Imm & 0x20000000ULL) != 0;
+ uint8_t BitC = (Imm & 0x01000000ULL) != 0;
+ uint8_t BitD = (Imm & 0x00800000ULL) != 0;
+ uint8_t BitE = (Imm & 0x00400000ULL) != 0;
+ uint8_t BitF = (Imm & 0x00200000ULL) != 0;
+ uint8_t BitG = (Imm & 0x00100000ULL) != 0;
+ uint8_t BitH = (Imm & 0x00080000ULL) != 0;
+
+ uint8_t EncVal = BitA;
+ EncVal <<= 1;
+ EncVal |= BitB;
+ EncVal <<= 1;
+ EncVal |= BitC;
+ EncVal <<= 1;
+ EncVal |= BitD;
+ EncVal <<= 1;
+ EncVal |= BitE;
+ EncVal <<= 1;
+ EncVal |= BitF;
+ EncVal <<= 1;
+ EncVal |= BitG;
+ EncVal <<= 1;
+ EncVal |= BitH;
+ return EncVal;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType11(uint8_t Imm) {
+ uint64_t EncVal = 0;
+ if (Imm & 0x80) EncVal |= 0x80000000ULL;
+ if (Imm & 0x40) EncVal |= 0x3e000000ULL;
+ else EncVal |= 0x40000000ULL;
+ if (Imm & 0x20) EncVal |= 0x01000000ULL;
+ if (Imm & 0x10) EncVal |= 0x00800000ULL;
+ if (Imm & 0x08) EncVal |= 0x00400000ULL;
+ if (Imm & 0x04) EncVal |= 0x00200000ULL;
+ if (Imm & 0x02) EncVal |= 0x00100000ULL;
+ if (Imm & 0x01) EncVal |= 0x00080000ULL;
+ return (EncVal << 32) | EncVal;
+}
+
+// aBbbbbbb bbcdefgh 0x00 0x00 0x00 0x00 0x00 0x00
+static inline bool isAdvSIMDModImmType12(uint64_t Imm) {
+ uint64_t BString = (Imm & 0x7fc0000000000000ULL) >> 54;
+ return ((BString == 0xff || BString == 0x100) &&
+ ((Imm & 0x0000ffffffffffffULL) == 0));
+}
+
+static inline uint8_t encodeAdvSIMDModImmType12(uint64_t Imm) {
+ uint8_t BitA = (Imm & 0x8000000000000000ULL) != 0;
+ uint8_t BitB = (Imm & 0x0040000000000000ULL) != 0;
+ uint8_t BitC = (Imm & 0x0020000000000000ULL) != 0;
+ uint8_t BitD = (Imm & 0x0010000000000000ULL) != 0;
+ uint8_t BitE = (Imm & 0x0008000000000000ULL) != 0;
+ uint8_t BitF = (Imm & 0x0004000000000000ULL) != 0;
+ uint8_t BitG = (Imm & 0x0002000000000000ULL) != 0;
+ uint8_t BitH = (Imm & 0x0001000000000000ULL) != 0;
+
+ uint8_t EncVal = BitA;
+ EncVal <<= 1;
+ EncVal |= BitB;
+ EncVal <<= 1;
+ EncVal |= BitC;
+ EncVal <<= 1;
+ EncVal |= BitD;
+ EncVal <<= 1;
+ EncVal |= BitE;
+ EncVal <<= 1;
+ EncVal |= BitF;
+ EncVal <<= 1;
+ EncVal |= BitG;
+ EncVal <<= 1;
+ EncVal |= BitH;
+ return EncVal;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType12(uint8_t Imm) {
+ uint64_t EncVal = 0;
+ if (Imm & 0x80) EncVal |= 0x8000000000000000ULL;
+ if (Imm & 0x40) EncVal |= 0x3fc0000000000000ULL;
+ else EncVal |= 0x4000000000000000ULL;
+ if (Imm & 0x20) EncVal |= 0x0020000000000000ULL;
+ if (Imm & 0x10) EncVal |= 0x0010000000000000ULL;
+ if (Imm & 0x08) EncVal |= 0x0008000000000000ULL;
+ if (Imm & 0x04) EncVal |= 0x0004000000000000ULL;
+ if (Imm & 0x02) EncVal |= 0x0002000000000000ULL;
+ if (Imm & 0x01) EncVal |= 0x0001000000000000ULL;
+ return (EncVal << 32) | EncVal;
+}
+
+/// Returns true if Imm is the concatenation of a repeating pattern of type T.
+template <typename T>
+static inline bool isSVEMaskOfIdenticalElements(int64_t Imm) {
+ auto Parts = bit_cast<std::array<T, sizeof(int64_t) / sizeof(T)>>(Imm);
+ return all_of(Parts, [&](T Elem) { return Elem == Parts[0]; });
+}
+
+/// Returns true if Imm is valid for CPY/DUP.
+template <typename T>
+static inline bool isSVECpyImm(int64_t Imm) {
+ bool IsImm8 = int8_t(Imm) == Imm;
+ bool IsImm16 = int16_t(Imm & ~0xff) == Imm;
+
+ if (std::is_same<int8_t, typename std::make_signed<T>::type>::value)
+ return IsImm8 || uint8_t(Imm) == Imm;
+
+ if (std::is_same<int16_t, typename std::make_signed<T>::type>::value)
+ return IsImm8 || IsImm16 || uint16_t(Imm & ~0xff) == Imm;
+
+ return IsImm8 || IsImm16;
+}
+
+/// Returns true if Imm is valid for ADD/SUB.
+template <typename T>
+static inline bool isSVEAddSubImm(int64_t Imm) {
+ bool IsInt8t =
+ std::is_same<int8_t, typename std::make_signed<T>::type>::value;
+ return uint8_t(Imm) == Imm || (!IsInt8t && uint16_t(Imm & ~0xff) == Imm);
+}
+
+/// Return true if Imm is valid for DUPM and has no single CPY/DUP equivalent.
+static inline bool isSVEMoveMaskPreferredLogicalImmediate(int64_t Imm) {
+ if (isSVECpyImm<int64_t>(Imm))
+ return false;
+
+ auto S = bit_cast<std::array<int32_t, 2>>(Imm);
+ auto H = bit_cast<std::array<int16_t, 4>>(Imm);
+ auto B = bit_cast<std::array<int8_t, 8>>(Imm);
+
+ if (isSVEMaskOfIdenticalElements<int32_t>(Imm) && isSVECpyImm<int32_t>(S[0]))
+ return false;
+ if (isSVEMaskOfIdenticalElements<int16_t>(Imm) && isSVECpyImm<int16_t>(H[0]))
+ return false;
+ if (isSVEMaskOfIdenticalElements<int8_t>(Imm) && isSVECpyImm<int8_t>(B[0]))
+ return false;
+ return isLogicalImmediate(Imm, 64);
+}
+
+inline static bool isAnyMOVZMovAlias(uint64_t Value, int RegWidth) {
+ for (int Shift = 0; Shift <= RegWidth - 16; Shift += 16)
+ if ((Value & ~(0xffffULL << Shift)) == 0)
+ return true;
+
+ return false;
+}
+
+inline static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth) {
+ if (RegWidth == 32)
+ Value &= 0xffffffffULL;
+
+ // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
+ if (Value == 0 && Shift != 0)
+ return false;
+
+ return (Value & ~(0xffffULL << Shift)) == 0;
+}
+
+inline static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth) {
+ // MOVZ takes precedence over MOVN.
+ if (isAnyMOVZMovAlias(Value, RegWidth))
+ return false;
+
+ Value = ~Value;
+ if (RegWidth == 32)
+ Value &= 0xffffffffULL;
+
+ return isMOVZMovAlias(Value, Shift, RegWidth);
+}
+
+inline static bool isAnyMOVWMovAlias(uint64_t Value, int RegWidth) {
+ if (isAnyMOVZMovAlias(Value, RegWidth))
+ return true;
+
+ // It's not a MOVZ, but it might be a MOVN.
+ Value = ~Value;
+ if (RegWidth == 32)
+ Value &= 0xffffffffULL;
+
+ return isAnyMOVZMovAlias(Value, RegWidth);
+}
+
+} // end namespace AArch64_AM
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
new file mode 100644
index 000000000000..6418211a4f55
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
@@ -0,0 +1,763 @@
+//===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/AArch64FixupKinds.h"
+#include "MCTargetDesc/AArch64MCExpr.h"
+#include "MCTargetDesc/AArch64MCTargetDesc.h"
+#include "Utils/AArch64BaseInfo.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCFixupKindInfo.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCTargetOptions.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
+namespace {
+
+class AArch64AsmBackend : public MCAsmBackend {
+ static const unsigned PCRelFlagVal =
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel;
+ Triple TheTriple;
+
+public:
+ AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
+ : MCAsmBackend(IsLittleEndian ? support::little : support::big),
+ TheTriple(TT) {}
+
+ unsigned getNumFixupKinds() const override {
+ return AArch64::NumTargetFixupKinds;
+ }
+
+ Optional<MCFixupKind> getFixupKind(StringRef Name) const override;
+
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
+ const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
+ // This table *must* be in the order that the fixup_* kinds are defined
+ // in AArch64FixupKinds.h.
+ //
+ // Name Offset (bits) Size (bits) Flags
+ {"fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal},
+ {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal},
+ {"fixup_aarch64_add_imm12", 10, 12, 0},
+ {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0},
+ {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0},
+ {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0},
+ {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0},
+ {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0},
+ {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal},
+ {"fixup_aarch64_movw", 5, 16, 0},
+ {"fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal},
+ {"fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal},
+ {"fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal},
+ {"fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal},
+ {"fixup_aarch64_tlsdesc_call", 0, 0, 0}};
+
+ if (Kind < FirstTargetFixupKind)
+ return MCAsmBackend::getFixupKindInfo(Kind);
+
+ assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
+ "Invalid kind!");
+ return Infos[Kind - FirstTargetFixupKind];
+ }
+
+ void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
+ const MCValue &Target, MutableArrayRef<char> Data,
+ uint64_t Value, bool IsResolved,
+ const MCSubtargetInfo *STI) const override;
+
+ bool mayNeedRelaxation(const MCInst &Inst,
+ const MCSubtargetInfo &STI) const override;
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const override;
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override;
+ bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
+
+ void HandleAssemblerFlag(MCAssemblerFlag Flag) {}
+
+ unsigned getPointerSize() const { return 8; }
+
+ unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
+
+ bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
+ const MCValue &Target) override;
+};
+
+} // end anonymous namespace
+
+/// The number of bytes the fixup may change.
+static unsigned getFixupKindNumBytes(unsigned Kind) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown fixup kind!");
+
+ case FK_NONE:
+ case AArch64::fixup_aarch64_tlsdesc_call:
+ return 0;
+
+ case FK_Data_1:
+ return 1;
+
+ case FK_Data_2:
+ case FK_SecRel_2:
+ return 2;
+
+ case AArch64::fixup_aarch64_movw:
+ case AArch64::fixup_aarch64_pcrel_branch14:
+ case AArch64::fixup_aarch64_add_imm12:
+ case AArch64::fixup_aarch64_ldst_imm12_scale1:
+ case AArch64::fixup_aarch64_ldst_imm12_scale2:
+ case AArch64::fixup_aarch64_ldst_imm12_scale4:
+ case AArch64::fixup_aarch64_ldst_imm12_scale8:
+ case AArch64::fixup_aarch64_ldst_imm12_scale16:
+ case AArch64::fixup_aarch64_ldr_pcrel_imm19:
+ case AArch64::fixup_aarch64_pcrel_branch19:
+ return 3;
+
+ case AArch64::fixup_aarch64_pcrel_adr_imm21:
+ case AArch64::fixup_aarch64_pcrel_adrp_imm21:
+ case AArch64::fixup_aarch64_pcrel_branch26:
+ case AArch64::fixup_aarch64_pcrel_call26:
+ case FK_Data_4:
+ case FK_SecRel_4:
+ return 4;
+
+ case FK_Data_8:
+ return 8;
+ }
+}
+
+static unsigned AdrImmBits(unsigned Value) {
+ unsigned lo2 = Value & 0x3;
+ unsigned hi19 = (Value & 0x1ffffc) >> 2;
+ return (hi19 << 5) | (lo2 << 29);
+}
+
+static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
+ uint64_t Value, MCContext &Ctx,
+ const Triple &TheTriple, bool IsResolved) {
+ unsigned Kind = Fixup.getKind();
+ int64_t SignedValue = static_cast<int64_t>(Value);
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown fixup kind!");
+ case AArch64::fixup_aarch64_pcrel_adr_imm21:
+ if (SignedValue > 2097151 || SignedValue < -2097152)
+ Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
+ return AdrImmBits(Value & 0x1fffffULL);
+ case AArch64::fixup_aarch64_pcrel_adrp_imm21:
+ assert(!IsResolved);
+ if (TheTriple.isOSBinFormatCOFF())
+ return AdrImmBits(Value & 0x1fffffULL);
+ return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
+ case AArch64::fixup_aarch64_ldr_pcrel_imm19:
+ case AArch64::fixup_aarch64_pcrel_branch19:
+ // Signed 21-bit immediate
+ if (SignedValue > 2097151 || SignedValue < -2097152)
+ Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
+ if (Value & 0x3)
+ Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
+ // Low two bits are not encoded.
+ return (Value >> 2) & 0x7ffff;
+ case AArch64::fixup_aarch64_add_imm12:
+ case AArch64::fixup_aarch64_ldst_imm12_scale1:
+ if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
+ Value &= 0xfff;
+ // Unsigned 12-bit immediate
+ if (Value >= 0x1000)
+ Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
+ return Value;
+ case AArch64::fixup_aarch64_ldst_imm12_scale2:
+ if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
+ Value &= 0xfff;
+ // Unsigned 12-bit immediate which gets multiplied by 2
+ if (Value >= 0x2000)
+ Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
+ if (Value & 0x1)
+ Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
+ return Value >> 1;
+ case AArch64::fixup_aarch64_ldst_imm12_scale4:
+ if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
+ Value &= 0xfff;
+ // Unsigned 12-bit immediate which gets multiplied by 4
+ if (Value >= 0x4000)
+ Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
+ if (Value & 0x3)
+ Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
+ return Value >> 2;
+ case AArch64::fixup_aarch64_ldst_imm12_scale8:
+ if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
+ Value &= 0xfff;
+ // Unsigned 12-bit immediate which gets multiplied by 8
+ if (Value >= 0x8000)
+ Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
+ if (Value & 0x7)
+ Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
+ return Value >> 3;
+ case AArch64::fixup_aarch64_ldst_imm12_scale16:
+ if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
+ Value &= 0xfff;
+ // Unsigned 12-bit immediate which gets multiplied by 16
+ if (Value >= 0x10000)
+ Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
+ if (Value & 0xf)
+ Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
+ return Value >> 4;
+ case AArch64::fixup_aarch64_movw: {
+ AArch64MCExpr::VariantKind RefKind =
+ static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
+ if (AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_ABS &&
+ AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_SABS) {
+ // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't
+ // ever be resolved in the assembler.
+ Ctx.reportError(Fixup.getLoc(),
+ "relocation for a thread-local variable points to an "
+ "absolute symbol");
+ return Value;
+ }
+
+ if (!IsResolved) {
+ // FIXME: Figure out when this can actually happen, and verify our
+ // behavior.
+ Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet "
+ "implemented");
+ return Value;
+ }
+
+ if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
+ switch (AArch64MCExpr::getAddressFrag(RefKind)) {
+ case AArch64MCExpr::VK_G0:
+ break;
+ case AArch64MCExpr::VK_G1:
+ SignedValue = SignedValue >> 16;
+ break;
+ case AArch64MCExpr::VK_G2:
+ SignedValue = SignedValue >> 32;
+ break;
+ case AArch64MCExpr::VK_G3:
+ SignedValue = SignedValue >> 48;
+ break;
+ default:
+ llvm_unreachable("Variant kind doesn't correspond to fixup");
+ }
+
+ } else {
+ switch (AArch64MCExpr::getAddressFrag(RefKind)) {
+ case AArch64MCExpr::VK_G0:
+ break;
+ case AArch64MCExpr::VK_G1:
+ Value = Value >> 16;
+ break;
+ case AArch64MCExpr::VK_G2:
+ Value = Value >> 32;
+ break;
+ case AArch64MCExpr::VK_G3:
+ Value = Value >> 48;
+ break;
+ default:
+ llvm_unreachable("Variant kind doesn't correspond to fixup");
+ }
+ }
+
+ if (RefKind & AArch64MCExpr::VK_NC) {
+ Value &= 0xFFFF;
+ }
+ else if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
+ if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
+ Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
+
+ // Invert the negative immediate because it will feed into a MOVN.
+ if (SignedValue < 0)
+ SignedValue = ~SignedValue;
+ Value = static_cast<uint64_t>(SignedValue);
+ }
+ else if (Value > 0xFFFF) {
+ Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
+ }
+ return Value;
+ }
+ case AArch64::fixup_aarch64_pcrel_branch14:
+ // Signed 16-bit immediate
+ if (SignedValue > 32767 || SignedValue < -32768)
+ Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
+ // Low two bits are not encoded (4-byte alignment assumed).
+ if (Value & 0x3)
+ Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
+ return (Value >> 2) & 0x3fff;
+ case AArch64::fixup_aarch64_pcrel_branch26:
+ case AArch64::fixup_aarch64_pcrel_call26:
+ // Signed 28-bit immediate
+ if (SignedValue > 134217727 || SignedValue < -134217728)
+ Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
+ // Low two bits are not encoded (4-byte alignment assumed).
+ if (Value & 0x3)
+ Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
+ return (Value >> 2) & 0x3ffffff;
+ case FK_NONE:
+ case FK_Data_1:
+ case FK_Data_2:
+ case FK_Data_4:
+ case FK_Data_8:
+ case FK_SecRel_2:
+ case FK_SecRel_4:
+ return Value;
+ }
+}
+
+Optional<MCFixupKind> AArch64AsmBackend::getFixupKind(StringRef Name) const {
+ if (TheTriple.isOSBinFormatELF() && Name == "R_AARCH64_NONE")
+ return FK_NONE;
+ return MCAsmBackend::getFixupKind(Name);
+}
+
+/// getFixupKindContainereSizeInBytes - The number of bytes of the
+/// container involved in big endian or 0 if the item is little endian
+unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
+ if (Endian == support::little)
+ return 0;
+
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown fixup kind!");
+
+ case FK_Data_1:
+ return 1;
+ case FK_Data_2:
+ return 2;
+ case FK_Data_4:
+ return 4;
+ case FK_Data_8:
+ return 8;
+
+ case AArch64::fixup_aarch64_tlsdesc_call:
+ case AArch64::fixup_aarch64_movw:
+ case AArch64::fixup_aarch64_pcrel_branch14:
+ case AArch64::fixup_aarch64_add_imm12:
+ case AArch64::fixup_aarch64_ldst_imm12_scale1:
+ case AArch64::fixup_aarch64_ldst_imm12_scale2:
+ case AArch64::fixup_aarch64_ldst_imm12_scale4:
+ case AArch64::fixup_aarch64_ldst_imm12_scale8:
+ case AArch64::fixup_aarch64_ldst_imm12_scale16:
+ case AArch64::fixup_aarch64_ldr_pcrel_imm19:
+ case AArch64::fixup_aarch64_pcrel_branch19:
+ case AArch64::fixup_aarch64_pcrel_adr_imm21:
+ case AArch64::fixup_aarch64_pcrel_adrp_imm21:
+ case AArch64::fixup_aarch64_pcrel_branch26:
+ case AArch64::fixup_aarch64_pcrel_call26:
+ // Instructions are always little endian
+ return 0;
+ }
+}
+
+void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
+ const MCValue &Target,
+ MutableArrayRef<char> Data, uint64_t Value,
+ bool IsResolved,
+ const MCSubtargetInfo *STI) const {
+ unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
+ if (!Value)
+ return; // Doesn't change encoding.
+ MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
+ MCContext &Ctx = Asm.getContext();
+ int64_t SignedValue = static_cast<int64_t>(Value);
+ // Apply any target-specific value adjustments.
+ Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved);
+
+ // Shift the value into position.
+ Value <<= Info.TargetOffset;
+
+ unsigned Offset = Fixup.getOffset();
+ assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
+
+ // Used to point to big endian bytes.
+ unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind());
+
+ // For each byte of the fragment that the fixup touches, mask in the
+ // bits from the fixup value.
+ if (FulleSizeInBytes == 0) {
+ // Handle as little-endian
+ for (unsigned i = 0; i != NumBytes; ++i) {
+ Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
+ }
+ } else {
+ // Handle as big-endian
+ assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!");
+ assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
+ for (unsigned i = 0; i != NumBytes; ++i) {
+ unsigned Idx = FulleSizeInBytes - 1 - i;
+ Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
+ }
+ }
+
+ // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to
+ // handle this more cleanly. This may affect the output of -show-mc-encoding.
+ AArch64MCExpr::VariantKind RefKind =
+ static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
+ if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
+ // If the immediate is negative, generate MOVN else MOVZ.
+ // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ.
+ if (SignedValue < 0)
+ Data[Offset + 3] &= ~(1 << 6);
+ else
+ Data[Offset + 3] |= (1 << 6);
+ }
+}
+
+bool AArch64AsmBackend::mayNeedRelaxation(const MCInst &Inst,
+ const MCSubtargetInfo &STI) const {
+ return false;
+}
+
+bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
+ uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const {
+ // FIXME: This isn't correct for AArch64. Just moving the "generic" logic
+ // into the targets for now.
+ //
+ // Relax if the value is too big for a (signed) i8.
+ return int64_t(Value) != int64_t(int8_t(Value));
+}
+
+void AArch64AsmBackend::relaxInstruction(const MCInst &Inst,
+ const MCSubtargetInfo &STI,
+ MCInst &Res) const {
+ llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented");
+}
+
+bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
+ // If the count is not 4-byte aligned, we must be writing data into the text
+ // section (otherwise we have unaligned instructions, and thus have far
+ // bigger problems), so just write zeros instead.
+ OS.write_zeros(Count % 4);
+
+ // We are properly aligned, so write NOPs as requested.
+ Count /= 4;
+ for (uint64_t i = 0; i != Count; ++i)
+ support::endian::write<uint32_t>(OS, 0xd503201f, Endian);
+ return true;
+}
+
+bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm,
+ const MCFixup &Fixup,
+ const MCValue &Target) {
+ unsigned Kind = Fixup.getKind();
+ if (Kind == FK_NONE)
+ return true;
+
+ // The ADRP instruction adds some multiple of 0x1000 to the current PC &
+ // ~0xfff. This means that the required offset to reach a symbol can vary by
+ // up to one step depending on where the ADRP is in memory. For example:
+ //
+ // ADRP x0, there
+ // there:
+ //
+ // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
+ // we'll need that as an offset. At any other address "there" will be in the
+ // same page as the ADRP and the instruction should encode 0x0. Assuming the
+ // section isn't 0x1000-aligned, we therefore need to delegate this decision
+ // to the linker -- a relocation!
+ if (Kind == AArch64::fixup_aarch64_pcrel_adrp_imm21)
+ return true;
+
+ AArch64MCExpr::VariantKind RefKind =
+ static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
+ AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind);
+ // LDR GOT relocations need a relocation
+ if (Kind == AArch64::fixup_aarch64_ldr_pcrel_imm19 &&
+ SymLoc == AArch64MCExpr::VK_GOT)
+ return true;
+ return false;
+}
+
+namespace {
+
+namespace CU {
+
+/// Compact unwind encoding values.
+enum CompactUnwindEncodings {
+ /// A "frameless" leaf function, where no non-volatile registers are
+ /// saved. The return remains in LR throughout the function.
+ UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
+
+ /// No compact unwind encoding available. Instead the low 23-bits of
+ /// the compact unwind encoding is the offset of the DWARF FDE in the
+ /// __eh_frame section. This mode is never used in object files. It is only
+ /// generated by the linker in final linked images, which have only DWARF info
+ /// for a function.
+ UNWIND_ARM64_MODE_DWARF = 0x03000000,
+
+ /// This is a standard arm64 prologue where FP/LR are immediately
+ /// pushed on the stack, then SP is copied to FP. If there are any
+ /// non-volatile register saved, they are copied into the stack fame in pairs
+ /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
+ /// five X pairs and four D pairs can be saved, but the memory layout must be
+ /// in register number order.
+ UNWIND_ARM64_MODE_FRAME = 0x04000000,
+
+ /// Frame register pair encodings.
+ UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
+ UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
+ UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
+ UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
+ UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
+ UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
+ UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
+ UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
+ UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
+};
+
+} // end CU namespace
+
+// FIXME: This should be in a separate file.
+class DarwinAArch64AsmBackend : public AArch64AsmBackend {
+ const MCRegisterInfo &MRI;
+ bool IsILP32;
+
+ /// Encode compact unwind stack adjustment for frameless functions.
+ /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
+ /// The stack size always needs to be 16 byte aligned.
+ uint32_t encodeStackAdjustment(uint32_t StackSize) const {
+ return (StackSize / 16) << 12;
+ }
+
+public:
+ DarwinAArch64AsmBackend(const Target &T, const Triple &TT,
+ const MCRegisterInfo &MRI, bool IsILP32)
+ : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI),
+ IsILP32(IsILP32) {}
+
+ std::unique_ptr<MCObjectTargetWriter>
+ createObjectTargetWriter() const override {
+ if (IsILP32)
+ return createAArch64MachObjectWriter(
+ MachO::CPU_TYPE_ARM64_32, MachO::CPU_SUBTYPE_ARM64_32_V8, true);
+ else
+ return createAArch64MachObjectWriter(MachO::CPU_TYPE_ARM64,
+ MachO::CPU_SUBTYPE_ARM64_ALL, false);
+ }
+
+ /// Generate the compact unwind encoding from the CFI directives.
+ uint32_t generateCompactUnwindEncoding(
+ ArrayRef<MCCFIInstruction> Instrs) const override {
+ if (Instrs.empty())
+ return CU::UNWIND_ARM64_MODE_FRAMELESS;
+
+ bool HasFP = false;
+ unsigned StackSize = 0;
+
+ uint32_t CompactUnwindEncoding = 0;
+ for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
+ const MCCFIInstruction &Inst = Instrs[i];
+
+ switch (Inst.getOperation()) {
+ default:
+ // Cannot handle this directive: bail out.
+ return CU::UNWIND_ARM64_MODE_DWARF;
+ case MCCFIInstruction::OpDefCfa: {
+ // Defines a frame pointer.
+ unsigned XReg =
+ getXRegFromWReg(MRI.getLLVMRegNum(Inst.getRegister(), true));
+
+ // Other CFA registers than FP are not supported by compact unwind.
+ // Fallback on DWARF.
+ // FIXME: When opt-remarks are supported in MC, add a remark to notify
+ // the user.
+ if (XReg != AArch64::FP)
+ return CU::UNWIND_ARM64_MODE_DWARF;
+
+ assert(XReg == AArch64::FP && "Invalid frame pointer!");
+ assert(i + 2 < e && "Insufficient CFI instructions to define a frame!");
+
+ const MCCFIInstruction &LRPush = Instrs[++i];
+ assert(LRPush.getOperation() == MCCFIInstruction::OpOffset &&
+ "Link register not pushed!");
+ const MCCFIInstruction &FPPush = Instrs[++i];
+ assert(FPPush.getOperation() == MCCFIInstruction::OpOffset &&
+ "Frame pointer not pushed!");
+
+ unsigned LRReg = MRI.getLLVMRegNum(LRPush.getRegister(), true);
+ unsigned FPReg = MRI.getLLVMRegNum(FPPush.getRegister(), true);
+
+ LRReg = getXRegFromWReg(LRReg);
+ FPReg = getXRegFromWReg(FPReg);
+
+ assert(LRReg == AArch64::LR && FPReg == AArch64::FP &&
+ "Pushing invalid registers for frame!");
+
+ // Indicate that the function has a frame.
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
+ HasFP = true;
+ break;
+ }
+ case MCCFIInstruction::OpDefCfaOffset: {
+ assert(StackSize == 0 && "We already have the CFA offset!");
+ StackSize = std::abs(Inst.getOffset());
+ break;
+ }
+ case MCCFIInstruction::OpOffset: {
+ // Registers are saved in pairs. We expect there to be two consecutive
+ // `.cfi_offset' instructions with the appropriate registers specified.
+ unsigned Reg1 = MRI.getLLVMRegNum(Inst.getRegister(), true);
+ if (i + 1 == e)
+ return CU::UNWIND_ARM64_MODE_DWARF;
+
+ const MCCFIInstruction &Inst2 = Instrs[++i];
+ if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
+ return CU::UNWIND_ARM64_MODE_DWARF;
+ unsigned Reg2 = MRI.getLLVMRegNum(Inst2.getRegister(), true);
+
+ // N.B. The encodings must be in register number order, and the X
+ // registers before the D registers.
+
+ // X19/X20 pair = 0x00000001,
+ // X21/X22 pair = 0x00000002,
+ // X23/X24 pair = 0x00000004,
+ // X25/X26 pair = 0x00000008,
+ // X27/X28 pair = 0x00000010
+ Reg1 = getXRegFromWReg(Reg1);
+ Reg2 = getXRegFromWReg(Reg2);
+
+ if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
+ (CompactUnwindEncoding & 0xF1E) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
+ else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
+ (CompactUnwindEncoding & 0xF1C) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
+ else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
+ (CompactUnwindEncoding & 0xF18) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
+ else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
+ (CompactUnwindEncoding & 0xF10) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
+ else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
+ (CompactUnwindEncoding & 0xF00) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
+ else {
+ Reg1 = getDRegFromBReg(Reg1);
+ Reg2 = getDRegFromBReg(Reg2);
+
+ // D8/D9 pair = 0x00000100,
+ // D10/D11 pair = 0x00000200,
+ // D12/D13 pair = 0x00000400,
+ // D14/D15 pair = 0x00000800
+ if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
+ (CompactUnwindEncoding & 0xE00) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
+ else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
+ (CompactUnwindEncoding & 0xC00) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
+ else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
+ (CompactUnwindEncoding & 0x800) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
+ else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
+ else
+ // A pair was pushed which we cannot handle.
+ return CU::UNWIND_ARM64_MODE_DWARF;
+ }
+
+ break;
+ }
+ }
+ }
+
+ if (!HasFP) {
+ // With compact unwind info we can only represent stack adjustments of up
+ // to 65520 bytes.
+ if (StackSize > 65520)
+ return CU::UNWIND_ARM64_MODE_DWARF;
+
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
+ CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
+ }
+
+ return CompactUnwindEncoding;
+ }
+};
+
+} // end anonymous namespace
+
+namespace {
+
+class ELFAArch64AsmBackend : public AArch64AsmBackend {
+public:
+ uint8_t OSABI;
+ bool IsILP32;
+
+ ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI,
+ bool IsLittleEndian, bool IsILP32)
+ : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI),
+ IsILP32(IsILP32) {}
+
+ std::unique_ptr<MCObjectTargetWriter>
+ createObjectTargetWriter() const override {
+ return createAArch64ELFObjectWriter(OSABI, IsILP32);
+ }
+};
+
+}
+
+namespace {
+class COFFAArch64AsmBackend : public AArch64AsmBackend {
+public:
+ COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple)
+ : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {}
+
+ std::unique_ptr<MCObjectTargetWriter>
+ createObjectTargetWriter() const override {
+ return createAArch64WinCOFFObjectWriter();
+ }
+};
+}
+
+MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
+ const MCSubtargetInfo &STI,
+ const MCRegisterInfo &MRI,
+ const MCTargetOptions &Options) {
+ const Triple &TheTriple = STI.getTargetTriple();
+ if (TheTriple.isOSBinFormatMachO()) {
+ const bool IsILP32 = TheTriple.isArch32Bit();
+ return new DarwinAArch64AsmBackend(T, TheTriple, MRI, IsILP32);
+ }
+
+ if (TheTriple.isOSBinFormatCOFF())
+ return new COFFAArch64AsmBackend(T, TheTriple);
+
+ assert(TheTriple.isOSBinFormatELF() && "Invalid target");
+
+ uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
+ bool IsILP32 = Options.getABIName() == "ilp32";
+ return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true,
+ IsILP32);
+}
+
+MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
+ const MCSubtargetInfo &STI,
+ const MCRegisterInfo &MRI,
+ const MCTargetOptions &Options) {
+ const Triple &TheTriple = STI.getTargetTriple();
+ assert(TheTriple.isOSBinFormatELF() &&
+ "Big endian is only supported for ELF targets!");
+ uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
+ bool IsILP32 = Options.getABIName() == "ilp32";
+ return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false,
+ IsILP32);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
new file mode 100644
index 000000000000..c871e2c62eac
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
@@ -0,0 +1,438 @@
+//===-- AArch64ELFObjectWriter.cpp - AArch64 ELF Writer -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file handles ELF-specific object emission, converting LLVM's internal
+// fixups into the appropriate relocations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/AArch64FixupKinds.h"
+#include "MCTargetDesc/AArch64MCExpr.h"
+#include "MCTargetDesc/AArch64MCTargetDesc.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstdint>
+
+using namespace llvm;
+
+namespace {
+
+class AArch64ELFObjectWriter : public MCELFObjectTargetWriter {
+public:
+ AArch64ELFObjectWriter(uint8_t OSABI, bool IsILP32);
+
+ ~AArch64ELFObjectWriter() override = default;
+
+protected:
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsPCRel) const override;
+ bool IsILP32;
+};
+
+} // end anonymous namespace
+
+AArch64ELFObjectWriter::AArch64ELFObjectWriter(uint8_t OSABI, bool IsILP32)
+ : MCELFObjectTargetWriter(/*Is64Bit*/ true, OSABI, ELF::EM_AARCH64,
+ /*HasRelocationAddend*/ true),
+ IsILP32(IsILP32) {}
+
+#define R_CLS(rtype) \
+ IsILP32 ? ELF::R_AARCH64_P32_##rtype : ELF::R_AARCH64_##rtype
+#define BAD_ILP32_MOV(lp64rtype) \
+ "ILP32 absolute MOV relocation not " \
+ "supported (LP64 eqv: " #lp64rtype ")"
+
+// assumes IsILP32 is true
+static bool isNonILP32reloc(const MCFixup &Fixup,
+ AArch64MCExpr::VariantKind RefKind,
+ MCContext &Ctx) {
+ if ((unsigned)Fixup.getKind() != AArch64::fixup_aarch64_movw)
+ return false;
+ switch (RefKind) {
+ case AArch64MCExpr::VK_ABS_G3:
+ Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(MOVW_UABS_G3));
+ return true;
+ case AArch64MCExpr::VK_ABS_G2:
+ Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(MOVW_UABS_G2));
+ return true;
+ case AArch64MCExpr::VK_ABS_G2_S:
+ Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(MOVW_SABS_G2));
+ return true;
+ case AArch64MCExpr::VK_ABS_G2_NC:
+ Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(MOVW_UABS_G2_NC));
+ return true;
+ case AArch64MCExpr::VK_ABS_G1_S:
+ Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(MOVW_SABS_G1));
+ return true;
+ case AArch64MCExpr::VK_ABS_G1_NC:
+ Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(MOVW_UABS_G1_NC));
+ return true;
+ case AArch64MCExpr::VK_DTPREL_G2:
+ Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(TLSLD_MOVW_DTPREL_G2));
+ return true;
+ case AArch64MCExpr::VK_DTPREL_G1_NC:
+ Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(TLSLD_MOVW_DTPREL_G1_NC));
+ return true;
+ case AArch64MCExpr::VK_TPREL_G2:
+ Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(TLSLE_MOVW_TPREL_G2));
+ return true;
+ case AArch64MCExpr::VK_TPREL_G1_NC:
+ Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(TLSLE_MOVW_TPREL_G1_NC));
+ return true;
+ case AArch64MCExpr::VK_GOTTPREL_G1:
+ Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(TLSIE_MOVW_GOTTPREL_G1));
+ return true;
+ case AArch64MCExpr::VK_GOTTPREL_G0_NC:
+ Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(TLSIE_MOVW_GOTTPREL_G0_NC));
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+unsigned AArch64ELFObjectWriter::getRelocType(MCContext &Ctx,
+ const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+ AArch64MCExpr::VariantKind RefKind =
+ static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
+ AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind);
+ bool IsNC = AArch64MCExpr::isNotChecked(RefKind);
+
+ assert((!Target.getSymA() ||
+ Target.getSymA()->getKind() == MCSymbolRefExpr::VK_None) &&
+ "Should only be expression-level modifiers here");
+
+ assert((!Target.getSymB() ||
+ Target.getSymB()->getKind() == MCSymbolRefExpr::VK_None) &&
+ "Should only be expression-level modifiers here");
+
+ if (IsPCRel) {
+ switch ((unsigned)Fixup.getKind()) {
+ case FK_Data_1:
+ Ctx.reportError(Fixup.getLoc(), "1-byte data relocations not supported");
+ return ELF::R_AARCH64_NONE;
+ case FK_Data_2:
+ return R_CLS(PREL16);
+ case FK_Data_4:
+ return R_CLS(PREL32);
+ case FK_Data_8:
+ if (IsILP32) {
+ Ctx.reportError(Fixup.getLoc(),
+ "ILP32 8 byte PC relative data "
+ "relocation not supported (LP64 eqv: PREL64)");
+ return ELF::R_AARCH64_NONE;
+ } else
+ return ELF::R_AARCH64_PREL64;
+ case AArch64::fixup_aarch64_pcrel_adr_imm21:
+ if (SymLoc != AArch64MCExpr::VK_ABS)
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid symbol kind for ADR relocation");
+ return R_CLS(ADR_PREL_LO21);
+ case AArch64::fixup_aarch64_pcrel_adrp_imm21:
+ if (SymLoc == AArch64MCExpr::VK_ABS && !IsNC)
+ return R_CLS(ADR_PREL_PG_HI21);
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC) {
+ if (IsILP32) {
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid fixup for 32-bit pcrel ADRP instruction "
+ "VK_ABS VK_NC");
+ return ELF::R_AARCH64_NONE;
+ } else {
+ return ELF::R_AARCH64_ADR_PREL_PG_HI21_NC;
+ }
+ }
+ if (SymLoc == AArch64MCExpr::VK_GOT && !IsNC)
+ return R_CLS(ADR_GOT_PAGE);
+ if (SymLoc == AArch64MCExpr::VK_GOTTPREL && !IsNC)
+ return R_CLS(TLSIE_ADR_GOTTPREL_PAGE21);
+ if (SymLoc == AArch64MCExpr::VK_TLSDESC && !IsNC)
+ return R_CLS(TLSDESC_ADR_PAGE21);
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid symbol kind for ADRP relocation");
+ return ELF::R_AARCH64_NONE;
+ case AArch64::fixup_aarch64_pcrel_branch26:
+ return R_CLS(JUMP26);
+ case AArch64::fixup_aarch64_pcrel_call26:
+ return R_CLS(CALL26);
+ case AArch64::fixup_aarch64_ldr_pcrel_imm19:
+ if (SymLoc == AArch64MCExpr::VK_GOTTPREL)
+ return R_CLS(TLSIE_LD_GOTTPREL_PREL19);
+ if (SymLoc == AArch64MCExpr::VK_GOT)
+ return R_CLS(GOT_LD_PREL19);
+ return R_CLS(LD_PREL_LO19);
+ case AArch64::fixup_aarch64_pcrel_branch14:
+ return R_CLS(TSTBR14);
+ case AArch64::fixup_aarch64_pcrel_branch19:
+ return R_CLS(CONDBR19);
+ default:
+ Ctx.reportError(Fixup.getLoc(), "Unsupported pc-relative fixup kind");
+ return ELF::R_AARCH64_NONE;
+ }
+ } else {
+ if (IsILP32 && isNonILP32reloc(Fixup, RefKind, Ctx))
+ return ELF::R_AARCH64_NONE;
+ switch ((unsigned)Fixup.getKind()) {
+ case FK_NONE:
+ return ELF::R_AARCH64_NONE;
+ case FK_Data_1:
+ Ctx.reportError(Fixup.getLoc(), "1-byte data relocations not supported");
+ return ELF::R_AARCH64_NONE;
+ case FK_Data_2:
+ return R_CLS(ABS16);
+ case FK_Data_4:
+ return R_CLS(ABS32);
+ case FK_Data_8:
+ if (IsILP32) {
+ Ctx.reportError(Fixup.getLoc(),
+ "ILP32 8 byte absolute data "
+ "relocation not supported (LP64 eqv: ABS64)");
+ return ELF::R_AARCH64_NONE;
+ } else
+ return ELF::R_AARCH64_ABS64;
+ case AArch64::fixup_aarch64_add_imm12:
+ if (RefKind == AArch64MCExpr::VK_DTPREL_HI12)
+ return R_CLS(TLSLD_ADD_DTPREL_HI12);
+ if (RefKind == AArch64MCExpr::VK_TPREL_HI12)
+ return R_CLS(TLSLE_ADD_TPREL_HI12);
+ if (RefKind == AArch64MCExpr::VK_DTPREL_LO12_NC)
+ return R_CLS(TLSLD_ADD_DTPREL_LO12_NC);
+ if (RefKind == AArch64MCExpr::VK_DTPREL_LO12)
+ return R_CLS(TLSLD_ADD_DTPREL_LO12);
+ if (RefKind == AArch64MCExpr::VK_TPREL_LO12_NC)
+ return R_CLS(TLSLE_ADD_TPREL_LO12_NC);
+ if (RefKind == AArch64MCExpr::VK_TPREL_LO12)
+ return R_CLS(TLSLE_ADD_TPREL_LO12);
+ if (RefKind == AArch64MCExpr::VK_TLSDESC_LO12)
+ return R_CLS(TLSDESC_ADD_LO12);
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return R_CLS(ADD_ABS_LO12_NC);
+
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid fixup for add (uimm12) instruction");
+ return ELF::R_AARCH64_NONE;
+ case AArch64::fixup_aarch64_ldst_imm12_scale1:
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return R_CLS(LDST8_ABS_LO12_NC);
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
+ return R_CLS(TLSLD_LDST8_DTPREL_LO12);
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
+ return R_CLS(TLSLD_LDST8_DTPREL_LO12_NC);
+ if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
+ return R_CLS(TLSLE_LDST8_TPREL_LO12);
+ if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
+ return R_CLS(TLSLE_LDST8_TPREL_LO12_NC);
+
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid fixup for 8-bit load/store instruction");
+ return ELF::R_AARCH64_NONE;
+ case AArch64::fixup_aarch64_ldst_imm12_scale2:
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return R_CLS(LDST16_ABS_LO12_NC);
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
+ return R_CLS(TLSLD_LDST16_DTPREL_LO12);
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
+ return R_CLS(TLSLD_LDST16_DTPREL_LO12_NC);
+ if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
+ return R_CLS(TLSLE_LDST16_TPREL_LO12);
+ if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
+ return R_CLS(TLSLE_LDST16_TPREL_LO12_NC);
+
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid fixup for 16-bit load/store instruction");
+ return ELF::R_AARCH64_NONE;
+ case AArch64::fixup_aarch64_ldst_imm12_scale4:
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return R_CLS(LDST32_ABS_LO12_NC);
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
+ return R_CLS(TLSLD_LDST32_DTPREL_LO12);
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
+ return R_CLS(TLSLD_LDST32_DTPREL_LO12_NC);
+ if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
+ return R_CLS(TLSLE_LDST32_TPREL_LO12);
+ if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
+ return R_CLS(TLSLE_LDST32_TPREL_LO12_NC);
+ if (SymLoc == AArch64MCExpr::VK_GOT && IsNC) {
+ if (IsILP32) {
+ return ELF::R_AARCH64_P32_LD32_GOT_LO12_NC;
+ } else {
+ Ctx.reportError(Fixup.getLoc(),
+ "LP64 4 byte unchecked GOT load/store relocation "
+ "not supported (ILP32 eqv: LD32_GOT_LO12_NC");
+ return ELF::R_AARCH64_NONE;
+ }
+ }
+ if (SymLoc == AArch64MCExpr::VK_GOT && !IsNC) {
+ if (IsILP32) {
+ Ctx.reportError(Fixup.getLoc(),
+ "ILP32 4 byte checked GOT load/store relocation "
+ "not supported (unchecked eqv: LD32_GOT_LO12_NC)");
+ } else {
+ Ctx.reportError(Fixup.getLoc(),
+ "LP64 4 byte checked GOT load/store relocation "
+ "not supported (unchecked/ILP32 eqv: "
+ "LD32_GOT_LO12_NC)");
+ }
+ return ELF::R_AARCH64_NONE;
+ }
+ if (SymLoc == AArch64MCExpr::VK_GOTTPREL && IsNC) {
+ if (IsILP32) {
+ return ELF::R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC;
+ } else {
+ Ctx.reportError(Fixup.getLoc(),
+ "LP64 32-bit load/store "
+ "relocation not supported (ILP32 eqv: "
+ "TLSIE_LD32_GOTTPREL_LO12_NC)");
+ return ELF::R_AARCH64_NONE;
+ }
+ }
+ if (SymLoc == AArch64MCExpr::VK_TLSDESC && !IsNC) {
+ if (IsILP32) {
+ return ELF::R_AARCH64_P32_TLSDESC_LD32_LO12;
+ } else {
+ Ctx.reportError(Fixup.getLoc(),
+ "LP64 4 byte TLSDESC load/store relocation "
+ "not supported (ILP32 eqv: TLSDESC_LD64_LO12)");
+ return ELF::R_AARCH64_NONE;
+ }
+ }
+
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid fixup for 32-bit load/store instruction "
+ "fixup_aarch64_ldst_imm12_scale4");
+ return ELF::R_AARCH64_NONE;
+ case AArch64::fixup_aarch64_ldst_imm12_scale8:
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return R_CLS(LDST64_ABS_LO12_NC);
+ if (SymLoc == AArch64MCExpr::VK_GOT && IsNC) {
+ if (!IsILP32) {
+ return ELF::R_AARCH64_LD64_GOT_LO12_NC;
+ } else {
+ Ctx.reportError(Fixup.getLoc(), "ILP32 64-bit load/store "
+ "relocation not supported (LP64 eqv: "
+ "LD64_GOT_LO12_NC)");
+ return ELF::R_AARCH64_NONE;
+ }
+ }
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
+ return R_CLS(TLSLD_LDST64_DTPREL_LO12);
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
+ return R_CLS(TLSLD_LDST64_DTPREL_LO12_NC);
+ if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
+ return R_CLS(TLSLE_LDST64_TPREL_LO12);
+ if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
+ return R_CLS(TLSLE_LDST64_TPREL_LO12_NC);
+ if (SymLoc == AArch64MCExpr::VK_GOTTPREL && IsNC) {
+ if (!IsILP32) {
+ return ELF::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
+ } else {
+ Ctx.reportError(Fixup.getLoc(), "ILP32 64-bit load/store "
+ "relocation not supported (LP64 eqv: "
+ "TLSIE_LD64_GOTTPREL_LO12_NC)");
+ return ELF::R_AARCH64_NONE;
+ }
+ }
+ if (SymLoc == AArch64MCExpr::VK_TLSDESC) {
+ if (!IsILP32) {
+ return ELF::R_AARCH64_TLSDESC_LD64_LO12;
+ } else {
+ Ctx.reportError(Fixup.getLoc(), "ILP32 64-bit load/store "
+ "relocation not supported (LP64 eqv: "
+ "TLSDESC_LD64_LO12)");
+ return ELF::R_AARCH64_NONE;
+ }
+ }
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid fixup for 64-bit load/store instruction");
+ return ELF::R_AARCH64_NONE;
+ case AArch64::fixup_aarch64_ldst_imm12_scale16:
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return R_CLS(LDST128_ABS_LO12_NC);
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
+ return R_CLS(TLSLD_LDST128_DTPREL_LO12);
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
+ return R_CLS(TLSLD_LDST128_DTPREL_LO12_NC);
+ if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
+ return R_CLS(TLSLE_LDST128_TPREL_LO12);
+ if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
+ return R_CLS(TLSLE_LDST128_TPREL_LO12_NC);
+
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid fixup for 128-bit load/store instruction");
+ return ELF::R_AARCH64_NONE;
+ // ILP32 case not reached here, tested with isNonILP32reloc
+ case AArch64::fixup_aarch64_movw:
+ if (RefKind == AArch64MCExpr::VK_ABS_G3)
+ return ELF::R_AARCH64_MOVW_UABS_G3;
+ if (RefKind == AArch64MCExpr::VK_ABS_G2)
+ return ELF::R_AARCH64_MOVW_UABS_G2;
+ if (RefKind == AArch64MCExpr::VK_ABS_G2_S)
+ return ELF::R_AARCH64_MOVW_SABS_G2;
+ if (RefKind == AArch64MCExpr::VK_ABS_G2_NC)
+ return ELF::R_AARCH64_MOVW_UABS_G2_NC;
+ if (RefKind == AArch64MCExpr::VK_ABS_G1)
+ return R_CLS(MOVW_UABS_G1);
+ if (RefKind == AArch64MCExpr::VK_ABS_G1_S)
+ return ELF::R_AARCH64_MOVW_SABS_G1;
+ if (RefKind == AArch64MCExpr::VK_ABS_G1_NC)
+ return ELF::R_AARCH64_MOVW_UABS_G1_NC;
+ if (RefKind == AArch64MCExpr::VK_ABS_G0)
+ return R_CLS(MOVW_UABS_G0);
+ if (RefKind == AArch64MCExpr::VK_ABS_G0_S)
+ return R_CLS(MOVW_SABS_G0);
+ if (RefKind == AArch64MCExpr::VK_ABS_G0_NC)
+ return R_CLS(MOVW_UABS_G0_NC);
+ if (RefKind == AArch64MCExpr::VK_DTPREL_G2)
+ return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G2;
+ if (RefKind == AArch64MCExpr::VK_DTPREL_G1)
+ return R_CLS(TLSLD_MOVW_DTPREL_G1);
+ if (RefKind == AArch64MCExpr::VK_DTPREL_G1_NC)
+ return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC;
+ if (RefKind == AArch64MCExpr::VK_DTPREL_G0)
+ return R_CLS(TLSLD_MOVW_DTPREL_G0);
+ if (RefKind == AArch64MCExpr::VK_DTPREL_G0_NC)
+ return R_CLS(TLSLD_MOVW_DTPREL_G0_NC);
+ if (RefKind == AArch64MCExpr::VK_TPREL_G2)
+ return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G2;
+ if (RefKind == AArch64MCExpr::VK_TPREL_G1)
+ return R_CLS(TLSLE_MOVW_TPREL_G1);
+ if (RefKind == AArch64MCExpr::VK_TPREL_G1_NC)
+ return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC;
+ if (RefKind == AArch64MCExpr::VK_TPREL_G0)
+ return R_CLS(TLSLE_MOVW_TPREL_G0);
+ if (RefKind == AArch64MCExpr::VK_TPREL_G0_NC)
+ return R_CLS(TLSLE_MOVW_TPREL_G0_NC);
+ if (RefKind == AArch64MCExpr::VK_GOTTPREL_G1)
+ return ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1;
+ if (RefKind == AArch64MCExpr::VK_GOTTPREL_G0_NC)
+ return ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC;
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid fixup for movz/movk instruction");
+ return ELF::R_AARCH64_NONE;
+ case AArch64::fixup_aarch64_tlsdesc_call:
+ return R_CLS(TLSDESC_CALL);
+ default:
+ Ctx.reportError(Fixup.getLoc(), "Unknown ELF relocation type");
+ return ELF::R_AARCH64_NONE;
+ }
+ }
+
+ llvm_unreachable("Unimplemented fixup -> relocation");
+}
+
+std::unique_ptr<MCObjectTargetWriter>
+llvm::createAArch64ELFObjectWriter(uint8_t OSABI, bool IsILP32) {
+ return llvm::make_unique<AArch64ELFObjectWriter>(OSABI, IsILP32);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
new file mode 100644
index 000000000000..c33f7e957b54
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
@@ -0,0 +1,216 @@
+//===- lib/MC/AArch64ELFStreamer.cpp - ELF Object Output for AArch64 ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file assembles .s files and emits AArch64 ELF .o object files. Different
+// from generic ELF streamer in emitting mapping symbols ($x and $d) to delimit
+// regions of data and code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AArch64TargetStreamer.h"
+#include "AArch64WinCOFFStreamer.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCELFStreamer.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/MC/MCWinCOFFStreamer.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+namespace {
+
+class AArch64ELFStreamer;
+
+class AArch64TargetAsmStreamer : public AArch64TargetStreamer {
+ formatted_raw_ostream &OS;
+
+ void emitInst(uint32_t Inst) override;
+
+public:
+ AArch64TargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS);
+};
+
+AArch64TargetAsmStreamer::AArch64TargetAsmStreamer(MCStreamer &S,
+ formatted_raw_ostream &OS)
+ : AArch64TargetStreamer(S), OS(OS) {}
+
+void AArch64TargetAsmStreamer::emitInst(uint32_t Inst) {
+ OS << "\t.inst\t0x" << Twine::utohexstr(Inst) << "\n";
+}
+
+/// Extend the generic ELFStreamer class so that it can emit mapping symbols at
+/// the appropriate points in the object files. These symbols are defined in the
+/// AArch64 ELF ABI:
+/// infocenter.arm.com/help/topic/com.arm.doc.ihi0056a/IHI0056A_aaelf64.pdf
+///
+/// In brief: $x or $d should be emitted at the start of each contiguous region
+/// of A64 code or data in a section. In practice, this emission does not rely
+/// on explicit assembler directives but on inherent properties of the
+/// directives doing the emission (e.g. ".byte" is data, "add x0, x0, x0" an
+/// instruction).
+///
+/// As a result this system is orthogonal to the DataRegion infrastructure used
+/// by MachO. Beware!
+class AArch64ELFStreamer : public MCELFStreamer {
+public:
+ AArch64ELFStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
+ std::unique_ptr<MCObjectWriter> OW,
+ std::unique_ptr<MCCodeEmitter> Emitter)
+ : MCELFStreamer(Context, std::move(TAB), std::move(OW),
+ std::move(Emitter)),
+ MappingSymbolCounter(0), LastEMS(EMS_None) {}
+
+ void ChangeSection(MCSection *Section, const MCExpr *Subsection) override {
+ // We have to keep track of the mapping symbol state of any sections we
+ // use. Each one should start off as EMS_None, which is provided as the
+ // default constructor by DenseMap::lookup.
+ LastMappingSymbols[getPreviousSection().first] = LastEMS;
+ LastEMS = LastMappingSymbols.lookup(Section);
+
+ MCELFStreamer::ChangeSection(Section, Subsection);
+ }
+
+ // Reset state between object emissions
+ void reset() override {
+ MappingSymbolCounter = 0;
+ MCELFStreamer::reset();
+ LastMappingSymbols.clear();
+ LastEMS = EMS_None;
+ }
+
+ /// This function is the one used to emit instruction data into the ELF
+ /// streamer. We override it to add the appropriate mapping symbol if
+ /// necessary.
+ void EmitInstruction(const MCInst &Inst,
+ const MCSubtargetInfo &STI) override {
+ EmitA64MappingSymbol();
+ MCELFStreamer::EmitInstruction(Inst, STI);
+ }
+
+ /// Emit a 32-bit value as an instruction. This is only used for the .inst
+ /// directive, EmitInstruction should be used in other cases.
+ void emitInst(uint32_t Inst) {
+ char Buffer[4];
+
+ // We can't just use EmitIntValue here, as that will emit a data mapping
+ // symbol, and swap the endianness on big-endian systems (instructions are
+ // always little-endian).
+ for (unsigned I = 0; I < 4; ++I) {
+ Buffer[I] = uint8_t(Inst);
+ Inst >>= 8;
+ }
+
+ EmitA64MappingSymbol();
+ MCELFStreamer::EmitBytes(StringRef(Buffer, 4));
+ }
+
+ /// This is one of the functions used to emit data into an ELF section, so the
+ /// AArch64 streamer overrides it to add the appropriate mapping symbol ($d)
+ /// if necessary.
+ void EmitBytes(StringRef Data) override {
+ EmitDataMappingSymbol();
+ MCELFStreamer::EmitBytes(Data);
+ }
+
+ /// This is one of the functions used to emit data into an ELF section, so the
+ /// AArch64 streamer overrides it to add the appropriate mapping symbol ($d)
+ /// if necessary.
+ void EmitValueImpl(const MCExpr *Value, unsigned Size, SMLoc Loc) override {
+ EmitDataMappingSymbol();
+ MCELFStreamer::EmitValueImpl(Value, Size, Loc);
+ }
+
+ void emitFill(const MCExpr &NumBytes, uint64_t FillValue,
+ SMLoc Loc) override {
+ EmitDataMappingSymbol();
+ MCObjectStreamer::emitFill(NumBytes, FillValue, Loc);
+ }
+private:
+ enum ElfMappingSymbol {
+ EMS_None,
+ EMS_A64,
+ EMS_Data
+ };
+
+ void EmitDataMappingSymbol() {
+ if (LastEMS == EMS_Data)
+ return;
+ EmitMappingSymbol("$d");
+ LastEMS = EMS_Data;
+ }
+
+ void EmitA64MappingSymbol() {
+ if (LastEMS == EMS_A64)
+ return;
+ EmitMappingSymbol("$x");
+ LastEMS = EMS_A64;
+ }
+
+ void EmitMappingSymbol(StringRef Name) {
+ auto *Symbol = cast<MCSymbolELF>(getContext().getOrCreateSymbol(
+ Name + "." + Twine(MappingSymbolCounter++)));
+ EmitLabel(Symbol);
+ Symbol->setType(ELF::STT_NOTYPE);
+ Symbol->setBinding(ELF::STB_LOCAL);
+ Symbol->setExternal(false);
+ }
+
+ int64_t MappingSymbolCounter;
+
+ DenseMap<const MCSection *, ElfMappingSymbol> LastMappingSymbols;
+ ElfMappingSymbol LastEMS;
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+
+AArch64ELFStreamer &AArch64TargetELFStreamer::getStreamer() {
+ return static_cast<AArch64ELFStreamer &>(Streamer);
+}
+
+void AArch64TargetELFStreamer::emitInst(uint32_t Inst) {
+ getStreamer().emitInst(Inst);
+}
+
+MCTargetStreamer *createAArch64AsmTargetStreamer(MCStreamer &S,
+ formatted_raw_ostream &OS,
+ MCInstPrinter *InstPrint,
+ bool isVerboseAsm) {
+ return new AArch64TargetAsmStreamer(S, OS);
+}
+
+MCELFStreamer *createAArch64ELFStreamer(MCContext &Context,
+ std::unique_ptr<MCAsmBackend> TAB,
+ std::unique_ptr<MCObjectWriter> OW,
+ std::unique_ptr<MCCodeEmitter> Emitter,
+ bool RelaxAll) {
+ AArch64ELFStreamer *S = new AArch64ELFStreamer(
+ Context, std::move(TAB), std::move(OW), std::move(Emitter));
+ if (RelaxAll)
+ S->getAssembler().setRelaxAll(true);
+ return S;
+}
+
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h
new file mode 100644
index 000000000000..25c609ee1496
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h
@@ -0,0 +1,27 @@
+//===-- AArch64ELFStreamer.h - ELF Streamer for AArch64 ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements ELF streamer information for the AArch64 backend.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ELFSTREAMER_H
+#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ELFSTREAMER_H
+
+#include "llvm/MC/MCELFStreamer.h"
+
+namespace llvm {
+
+MCELFStreamer *createAArch64ELFStreamer(MCContext &Context,
+ std::unique_ptr<MCAsmBackend> TAB,
+ std::unique_ptr<MCObjectWriter> OW,
+ std::unique_ptr<MCCodeEmitter> Emitter,
+ bool RelaxAll);
+}
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h
new file mode 100644
index 000000000000..fe8043fe5ec0
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h
@@ -0,0 +1,69 @@
+//===-- AArch64FixupKinds.h - AArch64 Specific Fixup Entries ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64FIXUPKINDS_H
+#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64FIXUPKINDS_H
+
+#include "llvm/MC/MCFixup.h"
+
+namespace llvm {
+namespace AArch64 {
+
+enum Fixups {
+ // A 21-bit pc-relative immediate inserted into an ADR instruction.
+ fixup_aarch64_pcrel_adr_imm21 = FirstTargetFixupKind,
+
+ // A 21-bit pc-relative immediate inserted into an ADRP instruction.
+ fixup_aarch64_pcrel_adrp_imm21,
+
+ // 12-bit fixup for add/sub instructions. No alignment adjustment. All value
+ // bits are encoded.
+ fixup_aarch64_add_imm12,
+
+ // unsigned 12-bit fixups for load and store instructions.
+ fixup_aarch64_ldst_imm12_scale1,
+ fixup_aarch64_ldst_imm12_scale2,
+ fixup_aarch64_ldst_imm12_scale4,
+ fixup_aarch64_ldst_imm12_scale8,
+ fixup_aarch64_ldst_imm12_scale16,
+
+ // The high 19 bits of a 21-bit pc-relative immediate. Same encoding as
+ // fixup_aarch64_pcrel_adrhi, except this is used by pc-relative loads and
+ // generates relocations directly when necessary.
+ fixup_aarch64_ldr_pcrel_imm19,
+
+ // FIXME: comment
+ fixup_aarch64_movw,
+
+ // The high 14 bits of a 21-bit pc-relative immediate.
+ fixup_aarch64_pcrel_branch14,
+
+ // The high 19 bits of a 21-bit pc-relative immediate. Same encoding as
+ // fixup_aarch64_pcrel_adrhi, except this is use by b.cc and generates
+ // relocations directly when necessary.
+ fixup_aarch64_pcrel_branch19,
+
+ // The high 26 bits of a 28-bit pc-relative immediate.
+ fixup_aarch64_pcrel_branch26,
+
+ // The high 26 bits of a 28-bit pc-relative immediate. Distinguished from
+ // branch26 only on ELF.
+ fixup_aarch64_pcrel_call26,
+
+ // zero-space placeholder for the ELF R_AARCH64_TLSDESC_CALL relocation.
+ fixup_aarch64_tlsdesc_call,
+
+ // Marker
+ LastTargetFixupKind,
+ NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
+};
+
+} // end namespace AArch64
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
new file mode 100644
index 000000000000..d0a544273b8b
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
@@ -0,0 +1,1587 @@
+//==-- AArch64InstPrinter.cpp - Convert AArch64 MCInst to assembly syntax --==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints an AArch64 MCInst to a .s file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AArch64InstPrinter.h"
+#include "MCTargetDesc/AArch64AddressingModes.h"
+#include "Utils/AArch64BaseInfo.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstdint>
+#include <string>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "asm-printer"
+
+#define GET_INSTRUCTION_NAME
+#define PRINT_ALIAS_INSTR
+#include "AArch64GenAsmWriter.inc"
+#define GET_INSTRUCTION_NAME
+#define PRINT_ALIAS_INSTR
+#include "AArch64GenAsmWriter1.inc"
+
+AArch64InstPrinter::AArch64InstPrinter(const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI)
+ : MCInstPrinter(MAI, MII, MRI) {}
+
+AArch64AppleInstPrinter::AArch64AppleInstPrinter(const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI)
+ : AArch64InstPrinter(MAI, MII, MRI) {}
+
+void AArch64InstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
+ // This is for .cfi directives.
+ OS << getRegisterName(RegNo);
+}
+
+void AArch64InstPrinter::printInst(const MCInst *MI, raw_ostream &O,
+ StringRef Annot,
+ const MCSubtargetInfo &STI) {
+ // Check for special encodings and print the canonical alias instead.
+
+ unsigned Opcode = MI->getOpcode();
+
+ if (Opcode == AArch64::SYSxt)
+ if (printSysAlias(MI, STI, O)) {
+ printAnnotation(O, Annot);
+ return;
+ }
+
+ // SBFM/UBFM should print to a nicer aliased form if possible.
+ if (Opcode == AArch64::SBFMXri || Opcode == AArch64::SBFMWri ||
+ Opcode == AArch64::UBFMXri || Opcode == AArch64::UBFMWri) {
+ const MCOperand &Op0 = MI->getOperand(0);
+ const MCOperand &Op1 = MI->getOperand(1);
+ const MCOperand &Op2 = MI->getOperand(2);
+ const MCOperand &Op3 = MI->getOperand(3);
+
+ bool IsSigned = (Opcode == AArch64::SBFMXri || Opcode == AArch64::SBFMWri);
+ bool Is64Bit = (Opcode == AArch64::SBFMXri || Opcode == AArch64::UBFMXri);
+ if (Op2.isImm() && Op2.getImm() == 0 && Op3.isImm()) {
+ const char *AsmMnemonic = nullptr;
+
+ switch (Op3.getImm()) {
+ default:
+ break;
+ case 7:
+ if (IsSigned)
+ AsmMnemonic = "sxtb";
+ else if (!Is64Bit)
+ AsmMnemonic = "uxtb";
+ break;
+ case 15:
+ if (IsSigned)
+ AsmMnemonic = "sxth";
+ else if (!Is64Bit)
+ AsmMnemonic = "uxth";
+ break;
+ case 31:
+ // *xtw is only valid for signed 64-bit operations.
+ if (Is64Bit && IsSigned)
+ AsmMnemonic = "sxtw";
+ break;
+ }
+
+ if (AsmMnemonic) {
+ O << '\t' << AsmMnemonic << '\t' << getRegisterName(Op0.getReg())
+ << ", " << getRegisterName(getWRegFromXReg(Op1.getReg()));
+ printAnnotation(O, Annot);
+ return;
+ }
+ }
+
+ // All immediate shifts are aliases, implemented using the Bitfield
+ // instruction. In all cases the immediate shift amount shift must be in
+ // the range 0 to (reg.size -1).
+ if (Op2.isImm() && Op3.isImm()) {
+ const char *AsmMnemonic = nullptr;
+ int shift = 0;
+ int64_t immr = Op2.getImm();
+ int64_t imms = Op3.getImm();
+ if (Opcode == AArch64::UBFMWri && imms != 0x1F && ((imms + 1) == immr)) {
+ AsmMnemonic = "lsl";
+ shift = 31 - imms;
+ } else if (Opcode == AArch64::UBFMXri && imms != 0x3f &&
+ ((imms + 1 == immr))) {
+ AsmMnemonic = "lsl";
+ shift = 63 - imms;
+ } else if (Opcode == AArch64::UBFMWri && imms == 0x1f) {
+ AsmMnemonic = "lsr";
+ shift = immr;
+ } else if (Opcode == AArch64::UBFMXri && imms == 0x3f) {
+ AsmMnemonic = "lsr";
+ shift = immr;
+ } else if (Opcode == AArch64::SBFMWri && imms == 0x1f) {
+ AsmMnemonic = "asr";
+ shift = immr;
+ } else if (Opcode == AArch64::SBFMXri && imms == 0x3f) {
+ AsmMnemonic = "asr";
+ shift = immr;
+ }
+ if (AsmMnemonic) {
+ O << '\t' << AsmMnemonic << '\t' << getRegisterName(Op0.getReg())
+ << ", " << getRegisterName(Op1.getReg()) << ", #" << shift;
+ printAnnotation(O, Annot);
+ return;
+ }
+ }
+
+ // SBFIZ/UBFIZ aliases
+ if (Op2.getImm() > Op3.getImm()) {
+ O << '\t' << (IsSigned ? "sbfiz" : "ubfiz") << '\t'
+ << getRegisterName(Op0.getReg()) << ", " << getRegisterName(Op1.getReg())
+ << ", #" << (Is64Bit ? 64 : 32) - Op2.getImm() << ", #" << Op3.getImm() + 1;
+ printAnnotation(O, Annot);
+ return;
+ }
+
+ // Otherwise SBFX/UBFX is the preferred form
+ O << '\t' << (IsSigned ? "sbfx" : "ubfx") << '\t'
+ << getRegisterName(Op0.getReg()) << ", " << getRegisterName(Op1.getReg())
+ << ", #" << Op2.getImm() << ", #" << Op3.getImm() - Op2.getImm() + 1;
+ printAnnotation(O, Annot);
+ return;
+ }
+
+ if (Opcode == AArch64::BFMXri || Opcode == AArch64::BFMWri) {
+ const MCOperand &Op0 = MI->getOperand(0); // Op1 == Op0
+ const MCOperand &Op2 = MI->getOperand(2);
+ int ImmR = MI->getOperand(3).getImm();
+ int ImmS = MI->getOperand(4).getImm();
+
+ if ((Op2.getReg() == AArch64::WZR || Op2.getReg() == AArch64::XZR) &&
+ (ImmR == 0 || ImmS < ImmR)) {
+ // BFC takes precedence over its entire range, sligtly differently to BFI.
+ int BitWidth = Opcode == AArch64::BFMXri ? 64 : 32;
+ int LSB = (BitWidth - ImmR) % BitWidth;
+ int Width = ImmS + 1;
+
+ O << "\tbfc\t" << getRegisterName(Op0.getReg())
+ << ", #" << LSB << ", #" << Width;
+ printAnnotation(O, Annot);
+ return;
+ } else if (ImmS < ImmR) {
+ // BFI alias
+ int BitWidth = Opcode == AArch64::BFMXri ? 64 : 32;
+ int LSB = (BitWidth - ImmR) % BitWidth;
+ int Width = ImmS + 1;
+
+ O << "\tbfi\t" << getRegisterName(Op0.getReg()) << ", "
+ << getRegisterName(Op2.getReg()) << ", #" << LSB << ", #" << Width;
+ printAnnotation(O, Annot);
+ return;
+ }
+
+ int LSB = ImmR;
+ int Width = ImmS - ImmR + 1;
+ // Otherwise BFXIL the preferred form
+ O << "\tbfxil\t"
+ << getRegisterName(Op0.getReg()) << ", " << getRegisterName(Op2.getReg())
+ << ", #" << LSB << ", #" << Width;
+ printAnnotation(O, Annot);
+ return;
+ }
+
+ // Symbolic operands for MOVZ, MOVN and MOVK already imply a shift
+ // (e.g. :gottprel_g1: is always going to be "lsl #16") so it should not be
+ // printed.
+ if ((Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi ||
+ Opcode == AArch64::MOVNXi || Opcode == AArch64::MOVNWi) &&
+ MI->getOperand(1).isExpr()) {
+ if (Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi)
+ O << "\tmovz\t";
+ else
+ O << "\tmovn\t";
+
+ O << getRegisterName(MI->getOperand(0).getReg()) << ", #";
+ MI->getOperand(1).getExpr()->print(O, &MAI);
+ return;
+ }
+
+ if ((Opcode == AArch64::MOVKXi || Opcode == AArch64::MOVKWi) &&
+ MI->getOperand(2).isExpr()) {
+ O << "\tmovk\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #";
+ MI->getOperand(2).getExpr()->print(O, &MAI);
+ return;
+ }
+
+ // MOVZ, MOVN and "ORR wzr, #imm" instructions are aliases for MOV, but their
+ // domains overlap so they need to be prioritized. The chain is "MOVZ lsl #0 >
+ // MOVZ lsl #N > MOVN lsl #0 > MOVN lsl #N > ORR". The highest instruction
+ // that can represent the move is the MOV alias, and the rest get printed
+ // normally.
+ if ((Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi) &&
+ MI->getOperand(1).isImm() && MI->getOperand(2).isImm()) {
+ int RegWidth = Opcode == AArch64::MOVZXi ? 64 : 32;
+ int Shift = MI->getOperand(2).getImm();
+ uint64_t Value = (uint64_t)MI->getOperand(1).getImm() << Shift;
+
+ if (AArch64_AM::isMOVZMovAlias(Value, Shift,
+ Opcode == AArch64::MOVZXi ? 64 : 32)) {
+ O << "\tmov\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #"
+ << formatImm(SignExtend64(Value, RegWidth));
+ return;
+ }
+ }
+
+ if ((Opcode == AArch64::MOVNXi || Opcode == AArch64::MOVNWi) &&
+ MI->getOperand(1).isImm() && MI->getOperand(2).isImm()) {
+ int RegWidth = Opcode == AArch64::MOVNXi ? 64 : 32;
+ int Shift = MI->getOperand(2).getImm();
+ uint64_t Value = ~((uint64_t)MI->getOperand(1).getImm() << Shift);
+ if (RegWidth == 32)
+ Value = Value & 0xffffffff;
+
+ if (AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth)) {
+ O << "\tmov\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #"
+ << formatImm(SignExtend64(Value, RegWidth));
+ return;
+ }
+ }
+
+ if ((Opcode == AArch64::ORRXri || Opcode == AArch64::ORRWri) &&
+ (MI->getOperand(1).getReg() == AArch64::XZR ||
+ MI->getOperand(1).getReg() == AArch64::WZR) &&
+ MI->getOperand(2).isImm()) {
+ int RegWidth = Opcode == AArch64::ORRXri ? 64 : 32;
+ uint64_t Value = AArch64_AM::decodeLogicalImmediate(
+ MI->getOperand(2).getImm(), RegWidth);
+ if (!AArch64_AM::isAnyMOVWMovAlias(Value, RegWidth)) {
+ O << "\tmov\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #"
+ << formatImm(SignExtend64(Value, RegWidth));
+ return;
+ }
+ }
+
+ if (Opcode == AArch64::CompilerBarrier) {
+ O << '\t' << MAI.getCommentString() << " COMPILER BARRIER";
+ printAnnotation(O, Annot);
+ return;
+ }
+
+ // Instruction TSB is specified as a one operand instruction, but 'csync' is
+ // not encoded, so for printing it is treated as a special case here:
+ if (Opcode == AArch64::TSB) {
+ O << "\ttsb\tcsync";
+ return;
+ }
+
+ if (!printAliasInstr(MI, STI, O))
+ printInstruction(MI, STI, O);
+
+ printAnnotation(O, Annot);
+
+ if (atomicBarrierDroppedOnZero(Opcode) &&
+ (MI->getOperand(0).getReg() == AArch64::XZR ||
+ MI->getOperand(0).getReg() == AArch64::WZR)) {
+ printAnnotation(O, "acquire semantics dropped since destination is zero");
+ }
+}
+
+static bool isTblTbxInstruction(unsigned Opcode, StringRef &Layout,
+ bool &IsTbx) {
+ switch (Opcode) {
+ case AArch64::TBXv8i8One:
+ case AArch64::TBXv8i8Two:
+ case AArch64::TBXv8i8Three:
+ case AArch64::TBXv8i8Four:
+ IsTbx = true;
+ Layout = ".8b";
+ return true;
+ case AArch64::TBLv8i8One:
+ case AArch64::TBLv8i8Two:
+ case AArch64::TBLv8i8Three:
+ case AArch64::TBLv8i8Four:
+ IsTbx = false;
+ Layout = ".8b";
+ return true;
+ case AArch64::TBXv16i8One:
+ case AArch64::TBXv16i8Two:
+ case AArch64::TBXv16i8Three:
+ case AArch64::TBXv16i8Four:
+ IsTbx = true;
+ Layout = ".16b";
+ return true;
+ case AArch64::TBLv16i8One:
+ case AArch64::TBLv16i8Two:
+ case AArch64::TBLv16i8Three:
+ case AArch64::TBLv16i8Four:
+ IsTbx = false;
+ Layout = ".16b";
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct LdStNInstrDesc {
+ unsigned Opcode;
+ const char *Mnemonic;
+ const char *Layout;
+ int ListOperand;
+ bool HasLane;
+ int NaturalOffset;
+};
+
+static const LdStNInstrDesc LdStNInstInfo[] = {
+ { AArch64::LD1i8, "ld1", ".b", 1, true, 0 },
+ { AArch64::LD1i16, "ld1", ".h", 1, true, 0 },
+ { AArch64::LD1i32, "ld1", ".s", 1, true, 0 },
+ { AArch64::LD1i64, "ld1", ".d", 1, true, 0 },
+ { AArch64::LD1i8_POST, "ld1", ".b", 2, true, 1 },
+ { AArch64::LD1i16_POST, "ld1", ".h", 2, true, 2 },
+ { AArch64::LD1i32_POST, "ld1", ".s", 2, true, 4 },
+ { AArch64::LD1i64_POST, "ld1", ".d", 2, true, 8 },
+ { AArch64::LD1Rv16b, "ld1r", ".16b", 0, false, 0 },
+ { AArch64::LD1Rv8h, "ld1r", ".8h", 0, false, 0 },
+ { AArch64::LD1Rv4s, "ld1r", ".4s", 0, false, 0 },
+ { AArch64::LD1Rv2d, "ld1r", ".2d", 0, false, 0 },
+ { AArch64::LD1Rv8b, "ld1r", ".8b", 0, false, 0 },
+ { AArch64::LD1Rv4h, "ld1r", ".4h", 0, false, 0 },
+ { AArch64::LD1Rv2s, "ld1r", ".2s", 0, false, 0 },
+ { AArch64::LD1Rv1d, "ld1r", ".1d", 0, false, 0 },
+ { AArch64::LD1Rv16b_POST, "ld1r", ".16b", 1, false, 1 },
+ { AArch64::LD1Rv8h_POST, "ld1r", ".8h", 1, false, 2 },
+ { AArch64::LD1Rv4s_POST, "ld1r", ".4s", 1, false, 4 },
+ { AArch64::LD1Rv2d_POST, "ld1r", ".2d", 1, false, 8 },
+ { AArch64::LD1Rv8b_POST, "ld1r", ".8b", 1, false, 1 },
+ { AArch64::LD1Rv4h_POST, "ld1r", ".4h", 1, false, 2 },
+ { AArch64::LD1Rv2s_POST, "ld1r", ".2s", 1, false, 4 },
+ { AArch64::LD1Rv1d_POST, "ld1r", ".1d", 1, false, 8 },
+ { AArch64::LD1Onev16b, "ld1", ".16b", 0, false, 0 },
+ { AArch64::LD1Onev8h, "ld1", ".8h", 0, false, 0 },
+ { AArch64::LD1Onev4s, "ld1", ".4s", 0, false, 0 },
+ { AArch64::LD1Onev2d, "ld1", ".2d", 0, false, 0 },
+ { AArch64::LD1Onev8b, "ld1", ".8b", 0, false, 0 },
+ { AArch64::LD1Onev4h, "ld1", ".4h", 0, false, 0 },
+ { AArch64::LD1Onev2s, "ld1", ".2s", 0, false, 0 },
+ { AArch64::LD1Onev1d, "ld1", ".1d", 0, false, 0 },
+ { AArch64::LD1Onev16b_POST, "ld1", ".16b", 1, false, 16 },
+ { AArch64::LD1Onev8h_POST, "ld1", ".8h", 1, false, 16 },
+ { AArch64::LD1Onev4s_POST, "ld1", ".4s", 1, false, 16 },
+ { AArch64::LD1Onev2d_POST, "ld1", ".2d", 1, false, 16 },
+ { AArch64::LD1Onev8b_POST, "ld1", ".8b", 1, false, 8 },
+ { AArch64::LD1Onev4h_POST, "ld1", ".4h", 1, false, 8 },
+ { AArch64::LD1Onev2s_POST, "ld1", ".2s", 1, false, 8 },
+ { AArch64::LD1Onev1d_POST, "ld1", ".1d", 1, false, 8 },
+ { AArch64::LD1Twov16b, "ld1", ".16b", 0, false, 0 },
+ { AArch64::LD1Twov8h, "ld1", ".8h", 0, false, 0 },
+ { AArch64::LD1Twov4s, "ld1", ".4s", 0, false, 0 },
+ { AArch64::LD1Twov2d, "ld1", ".2d", 0, false, 0 },
+ { AArch64::LD1Twov8b, "ld1", ".8b", 0, false, 0 },
+ { AArch64::LD1Twov4h, "ld1", ".4h", 0, false, 0 },
+ { AArch64::LD1Twov2s, "ld1", ".2s", 0, false, 0 },
+ { AArch64::LD1Twov1d, "ld1", ".1d", 0, false, 0 },
+ { AArch64::LD1Twov16b_POST, "ld1", ".16b", 1, false, 32 },
+ { AArch64::LD1Twov8h_POST, "ld1", ".8h", 1, false, 32 },
+ { AArch64::LD1Twov4s_POST, "ld1", ".4s", 1, false, 32 },
+ { AArch64::LD1Twov2d_POST, "ld1", ".2d", 1, false, 32 },
+ { AArch64::LD1Twov8b_POST, "ld1", ".8b", 1, false, 16 },
+ { AArch64::LD1Twov4h_POST, "ld1", ".4h", 1, false, 16 },
+ { AArch64::LD1Twov2s_POST, "ld1", ".2s", 1, false, 16 },
+ { AArch64::LD1Twov1d_POST, "ld1", ".1d", 1, false, 16 },
+ { AArch64::LD1Threev16b, "ld1", ".16b", 0, false, 0 },
+ { AArch64::LD1Threev8h, "ld1", ".8h", 0, false, 0 },
+ { AArch64::LD1Threev4s, "ld1", ".4s", 0, false, 0 },
+ { AArch64::LD1Threev2d, "ld1", ".2d", 0, false, 0 },
+ { AArch64::LD1Threev8b, "ld1", ".8b", 0, false, 0 },
+ { AArch64::LD1Threev4h, "ld1", ".4h", 0, false, 0 },
+ { AArch64::LD1Threev2s, "ld1", ".2s", 0, false, 0 },
+ { AArch64::LD1Threev1d, "ld1", ".1d", 0, false, 0 },
+ { AArch64::LD1Threev16b_POST, "ld1", ".16b", 1, false, 48 },
+ { AArch64::LD1Threev8h_POST, "ld1", ".8h", 1, false, 48 },
+ { AArch64::LD1Threev4s_POST, "ld1", ".4s", 1, false, 48 },
+ { AArch64::LD1Threev2d_POST, "ld1", ".2d", 1, false, 48 },
+ { AArch64::LD1Threev8b_POST, "ld1", ".8b", 1, false, 24 },
+ { AArch64::LD1Threev4h_POST, "ld1", ".4h", 1, false, 24 },
+ { AArch64::LD1Threev2s_POST, "ld1", ".2s", 1, false, 24 },
+ { AArch64::LD1Threev1d_POST, "ld1", ".1d", 1, false, 24 },
+ { AArch64::LD1Fourv16b, "ld1", ".16b", 0, false, 0 },
+ { AArch64::LD1Fourv8h, "ld1", ".8h", 0, false, 0 },
+ { AArch64::LD1Fourv4s, "ld1", ".4s", 0, false, 0 },
+ { AArch64::LD1Fourv2d, "ld1", ".2d", 0, false, 0 },
+ { AArch64::LD1Fourv8b, "ld1", ".8b", 0, false, 0 },
+ { AArch64::LD1Fourv4h, "ld1", ".4h", 0, false, 0 },
+ { AArch64::LD1Fourv2s, "ld1", ".2s", 0, false, 0 },
+ { AArch64::LD1Fourv1d, "ld1", ".1d", 0, false, 0 },
+ { AArch64::LD1Fourv16b_POST, "ld1", ".16b", 1, false, 64 },
+ { AArch64::LD1Fourv8h_POST, "ld1", ".8h", 1, false, 64 },
+ { AArch64::LD1Fourv4s_POST, "ld1", ".4s", 1, false, 64 },
+ { AArch64::LD1Fourv2d_POST, "ld1", ".2d", 1, false, 64 },
+ { AArch64::LD1Fourv8b_POST, "ld1", ".8b", 1, false, 32 },
+ { AArch64::LD1Fourv4h_POST, "ld1", ".4h", 1, false, 32 },
+ { AArch64::LD1Fourv2s_POST, "ld1", ".2s", 1, false, 32 },
+ { AArch64::LD1Fourv1d_POST, "ld1", ".1d", 1, false, 32 },
+ { AArch64::LD2i8, "ld2", ".b", 1, true, 0 },
+ { AArch64::LD2i16, "ld2", ".h", 1, true, 0 },
+ { AArch64::LD2i32, "ld2", ".s", 1, true, 0 },
+ { AArch64::LD2i64, "ld2", ".d", 1, true, 0 },
+ { AArch64::LD2i8_POST, "ld2", ".b", 2, true, 2 },
+ { AArch64::LD2i16_POST, "ld2", ".h", 2, true, 4 },
+ { AArch64::LD2i32_POST, "ld2", ".s", 2, true, 8 },
+ { AArch64::LD2i64_POST, "ld2", ".d", 2, true, 16 },
+ { AArch64::LD2Rv16b, "ld2r", ".16b", 0, false, 0 },
+ { AArch64::LD2Rv8h, "ld2r", ".8h", 0, false, 0 },
+ { AArch64::LD2Rv4s, "ld2r", ".4s", 0, false, 0 },
+ { AArch64::LD2Rv2d, "ld2r", ".2d", 0, false, 0 },
+ { AArch64::LD2Rv8b, "ld2r", ".8b", 0, false, 0 },
+ { AArch64::LD2Rv4h, "ld2r", ".4h", 0, false, 0 },
+ { AArch64::LD2Rv2s, "ld2r", ".2s", 0, false, 0 },
+ { AArch64::LD2Rv1d, "ld2r", ".1d", 0, false, 0 },
+ { AArch64::LD2Rv16b_POST, "ld2r", ".16b", 1, false, 2 },
+ { AArch64::LD2Rv8h_POST, "ld2r", ".8h", 1, false, 4 },
+ { AArch64::LD2Rv4s_POST, "ld2r", ".4s", 1, false, 8 },
+ { AArch64::LD2Rv2d_POST, "ld2r", ".2d", 1, false, 16 },
+ { AArch64::LD2Rv8b_POST, "ld2r", ".8b", 1, false, 2 },
+ { AArch64::LD2Rv4h_POST, "ld2r", ".4h", 1, false, 4 },
+ { AArch64::LD2Rv2s_POST, "ld2r", ".2s", 1, false, 8 },
+ { AArch64::LD2Rv1d_POST, "ld2r", ".1d", 1, false, 16 },
+ { AArch64::LD2Twov16b, "ld2", ".16b", 0, false, 0 },
+ { AArch64::LD2Twov8h, "ld2", ".8h", 0, false, 0 },
+ { AArch64::LD2Twov4s, "ld2", ".4s", 0, false, 0 },
+ { AArch64::LD2Twov2d, "ld2", ".2d", 0, false, 0 },
+ { AArch64::LD2Twov8b, "ld2", ".8b", 0, false, 0 },
+ { AArch64::LD2Twov4h, "ld2", ".4h", 0, false, 0 },
+ { AArch64::LD2Twov2s, "ld2", ".2s", 0, false, 0 },
+ { AArch64::LD2Twov16b_POST, "ld2", ".16b", 1, false, 32 },
+ { AArch64::LD2Twov8h_POST, "ld2", ".8h", 1, false, 32 },
+ { AArch64::LD2Twov4s_POST, "ld2", ".4s", 1, false, 32 },
+ { AArch64::LD2Twov2d_POST, "ld2", ".2d", 1, false, 32 },
+ { AArch64::LD2Twov8b_POST, "ld2", ".8b", 1, false, 16 },
+ { AArch64::LD2Twov4h_POST, "ld2", ".4h", 1, false, 16 },
+ { AArch64::LD2Twov2s_POST, "ld2", ".2s", 1, false, 16 },
+ { AArch64::LD3i8, "ld3", ".b", 1, true, 0 },
+ { AArch64::LD3i16, "ld3", ".h", 1, true, 0 },
+ { AArch64::LD3i32, "ld3", ".s", 1, true, 0 },
+ { AArch64::LD3i64, "ld3", ".d", 1, true, 0 },
+ { AArch64::LD3i8_POST, "ld3", ".b", 2, true, 3 },
+ { AArch64::LD3i16_POST, "ld3", ".h", 2, true, 6 },
+ { AArch64::LD3i32_POST, "ld3", ".s", 2, true, 12 },
+ { AArch64::LD3i64_POST, "ld3", ".d", 2, true, 24 },
+ { AArch64::LD3Rv16b, "ld3r", ".16b", 0, false, 0 },
+ { AArch64::LD3Rv8h, "ld3r", ".8h", 0, false, 0 },
+ { AArch64::LD3Rv4s, "ld3r", ".4s", 0, false, 0 },
+ { AArch64::LD3Rv2d, "ld3r", ".2d", 0, false, 0 },
+ { AArch64::LD3Rv8b, "ld3r", ".8b", 0, false, 0 },
+ { AArch64::LD3Rv4h, "ld3r", ".4h", 0, false, 0 },
+ { AArch64::LD3Rv2s, "ld3r", ".2s", 0, false, 0 },
+ { AArch64::LD3Rv1d, "ld3r", ".1d", 0, false, 0 },
+ { AArch64::LD3Rv16b_POST, "ld3r", ".16b", 1, false, 3 },
+ { AArch64::LD3Rv8h_POST, "ld3r", ".8h", 1, false, 6 },
+ { AArch64::LD3Rv4s_POST, "ld3r", ".4s", 1, false, 12 },
+ { AArch64::LD3Rv2d_POST, "ld3r", ".2d", 1, false, 24 },
+ { AArch64::LD3Rv8b_POST, "ld3r", ".8b", 1, false, 3 },
+ { AArch64::LD3Rv4h_POST, "ld3r", ".4h", 1, false, 6 },
+ { AArch64::LD3Rv2s_POST, "ld3r", ".2s", 1, false, 12 },
+ { AArch64::LD3Rv1d_POST, "ld3r", ".1d", 1, false, 24 },
+ { AArch64::LD3Threev16b, "ld3", ".16b", 0, false, 0 },
+ { AArch64::LD3Threev8h, "ld3", ".8h", 0, false, 0 },
+ { AArch64::LD3Threev4s, "ld3", ".4s", 0, false, 0 },
+ { AArch64::LD3Threev2d, "ld3", ".2d", 0, false, 0 },
+ { AArch64::LD3Threev8b, "ld3", ".8b", 0, false, 0 },
+ { AArch64::LD3Threev4h, "ld3", ".4h", 0, false, 0 },
+ { AArch64::LD3Threev2s, "ld3", ".2s", 0, false, 0 },
+ { AArch64::LD3Threev16b_POST, "ld3", ".16b", 1, false, 48 },
+ { AArch64::LD3Threev8h_POST, "ld3", ".8h", 1, false, 48 },
+ { AArch64::LD3Threev4s_POST, "ld3", ".4s", 1, false, 48 },
+ { AArch64::LD3Threev2d_POST, "ld3", ".2d", 1, false, 48 },
+ { AArch64::LD3Threev8b_POST, "ld3", ".8b", 1, false, 24 },
+ { AArch64::LD3Threev4h_POST, "ld3", ".4h", 1, false, 24 },
+ { AArch64::LD3Threev2s_POST, "ld3", ".2s", 1, false, 24 },
+ { AArch64::LD4i8, "ld4", ".b", 1, true, 0 },
+ { AArch64::LD4i16, "ld4", ".h", 1, true, 0 },
+ { AArch64::LD4i32, "ld4", ".s", 1, true, 0 },
+ { AArch64::LD4i64, "ld4", ".d", 1, true, 0 },
+ { AArch64::LD4i8_POST, "ld4", ".b", 2, true, 4 },
+ { AArch64::LD4i16_POST, "ld4", ".h", 2, true, 8 },
+ { AArch64::LD4i32_POST, "ld4", ".s", 2, true, 16 },
+ { AArch64::LD4i64_POST, "ld4", ".d", 2, true, 32 },
+ { AArch64::LD4Rv16b, "ld4r", ".16b", 0, false, 0 },
+ { AArch64::LD4Rv8h, "ld4r", ".8h", 0, false, 0 },
+ { AArch64::LD4Rv4s, "ld4r", ".4s", 0, false, 0 },
+ { AArch64::LD4Rv2d, "ld4r", ".2d", 0, false, 0 },
+ { AArch64::LD4Rv8b, "ld4r", ".8b", 0, false, 0 },
+ { AArch64::LD4Rv4h, "ld4r", ".4h", 0, false, 0 },
+ { AArch64::LD4Rv2s, "ld4r", ".2s", 0, false, 0 },
+ { AArch64::LD4Rv1d, "ld4r", ".1d", 0, false, 0 },
+ { AArch64::LD4Rv16b_POST, "ld4r", ".16b", 1, false, 4 },
+ { AArch64::LD4Rv8h_POST, "ld4r", ".8h", 1, false, 8 },
+ { AArch64::LD4Rv4s_POST, "ld4r", ".4s", 1, false, 16 },
+ { AArch64::LD4Rv2d_POST, "ld4r", ".2d", 1, false, 32 },
+ { AArch64::LD4Rv8b_POST, "ld4r", ".8b", 1, false, 4 },
+ { AArch64::LD4Rv4h_POST, "ld4r", ".4h", 1, false, 8 },
+ { AArch64::LD4Rv2s_POST, "ld4r", ".2s", 1, false, 16 },
+ { AArch64::LD4Rv1d_POST, "ld4r", ".1d", 1, false, 32 },
+ { AArch64::LD4Fourv16b, "ld4", ".16b", 0, false, 0 },
+ { AArch64::LD4Fourv8h, "ld4", ".8h", 0, false, 0 },
+ { AArch64::LD4Fourv4s, "ld4", ".4s", 0, false, 0 },
+ { AArch64::LD4Fourv2d, "ld4", ".2d", 0, false, 0 },
+ { AArch64::LD4Fourv8b, "ld4", ".8b", 0, false, 0 },
+ { AArch64::LD4Fourv4h, "ld4", ".4h", 0, false, 0 },
+ { AArch64::LD4Fourv2s, "ld4", ".2s", 0, false, 0 },
+ { AArch64::LD4Fourv16b_POST, "ld4", ".16b", 1, false, 64 },
+ { AArch64::LD4Fourv8h_POST, "ld4", ".8h", 1, false, 64 },
+ { AArch64::LD4Fourv4s_POST, "ld4", ".4s", 1, false, 64 },
+ { AArch64::LD4Fourv2d_POST, "ld4", ".2d", 1, false, 64 },
+ { AArch64::LD4Fourv8b_POST, "ld4", ".8b", 1, false, 32 },
+ { AArch64::LD4Fourv4h_POST, "ld4", ".4h", 1, false, 32 },
+ { AArch64::LD4Fourv2s_POST, "ld4", ".2s", 1, false, 32 },
+ { AArch64::ST1i8, "st1", ".b", 0, true, 0 },
+ { AArch64::ST1i16, "st1", ".h", 0, true, 0 },
+ { AArch64::ST1i32, "st1", ".s", 0, true, 0 },
+ { AArch64::ST1i64, "st1", ".d", 0, true, 0 },
+ { AArch64::ST1i8_POST, "st1", ".b", 1, true, 1 },
+ { AArch64::ST1i16_POST, "st1", ".h", 1, true, 2 },
+ { AArch64::ST1i32_POST, "st1", ".s", 1, true, 4 },
+ { AArch64::ST1i64_POST, "st1", ".d", 1, true, 8 },
+ { AArch64::ST1Onev16b, "st1", ".16b", 0, false, 0 },
+ { AArch64::ST1Onev8h, "st1", ".8h", 0, false, 0 },
+ { AArch64::ST1Onev4s, "st1", ".4s", 0, false, 0 },
+ { AArch64::ST1Onev2d, "st1", ".2d", 0, false, 0 },
+ { AArch64::ST1Onev8b, "st1", ".8b", 0, false, 0 },
+ { AArch64::ST1Onev4h, "st1", ".4h", 0, false, 0 },
+ { AArch64::ST1Onev2s, "st1", ".2s", 0, false, 0 },
+ { AArch64::ST1Onev1d, "st1", ".1d", 0, false, 0 },
+ { AArch64::ST1Onev16b_POST, "st1", ".16b", 1, false, 16 },
+ { AArch64::ST1Onev8h_POST, "st1", ".8h", 1, false, 16 },
+ { AArch64::ST1Onev4s_POST, "st1", ".4s", 1, false, 16 },
+ { AArch64::ST1Onev2d_POST, "st1", ".2d", 1, false, 16 },
+ { AArch64::ST1Onev8b_POST, "st1", ".8b", 1, false, 8 },
+ { AArch64::ST1Onev4h_POST, "st1", ".4h", 1, false, 8 },
+ { AArch64::ST1Onev2s_POST, "st1", ".2s", 1, false, 8 },
+ { AArch64::ST1Onev1d_POST, "st1", ".1d", 1, false, 8 },
+ { AArch64::ST1Twov16b, "st1", ".16b", 0, false, 0 },
+ { AArch64::ST1Twov8h, "st1", ".8h", 0, false, 0 },
+ { AArch64::ST1Twov4s, "st1", ".4s", 0, false, 0 },
+ { AArch64::ST1Twov2d, "st1", ".2d", 0, false, 0 },
+ { AArch64::ST1Twov8b, "st1", ".8b", 0, false, 0 },
+ { AArch64::ST1Twov4h, "st1", ".4h", 0, false, 0 },
+ { AArch64::ST1Twov2s, "st1", ".2s", 0, false, 0 },
+ { AArch64::ST1Twov1d, "st1", ".1d", 0, false, 0 },
+ { AArch64::ST1Twov16b_POST, "st1", ".16b", 1, false, 32 },
+ { AArch64::ST1Twov8h_POST, "st1", ".8h", 1, false, 32 },
+ { AArch64::ST1Twov4s_POST, "st1", ".4s", 1, false, 32 },
+ { AArch64::ST1Twov2d_POST, "st1", ".2d", 1, false, 32 },
+ { AArch64::ST1Twov8b_POST, "st1", ".8b", 1, false, 16 },
+ { AArch64::ST1Twov4h_POST, "st1", ".4h", 1, false, 16 },
+ { AArch64::ST1Twov2s_POST, "st1", ".2s", 1, false, 16 },
+ { AArch64::ST1Twov1d_POST, "st1", ".1d", 1, false, 16 },
+ { AArch64::ST1Threev16b, "st1", ".16b", 0, false, 0 },
+ { AArch64::ST1Threev8h, "st1", ".8h", 0, false, 0 },
+ { AArch64::ST1Threev4s, "st1", ".4s", 0, false, 0 },
+ { AArch64::ST1Threev2d, "st1", ".2d", 0, false, 0 },
+ { AArch64::ST1Threev8b, "st1", ".8b", 0, false, 0 },
+ { AArch64::ST1Threev4h, "st1", ".4h", 0, false, 0 },
+ { AArch64::ST1Threev2s, "st1", ".2s", 0, false, 0 },
+ { AArch64::ST1Threev1d, "st1", ".1d", 0, false, 0 },
+ { AArch64::ST1Threev16b_POST, "st1", ".16b", 1, false, 48 },
+ { AArch64::ST1Threev8h_POST, "st1", ".8h", 1, false, 48 },
+ { AArch64::ST1Threev4s_POST, "st1", ".4s", 1, false, 48 },
+ { AArch64::ST1Threev2d_POST, "st1", ".2d", 1, false, 48 },
+ { AArch64::ST1Threev8b_POST, "st1", ".8b", 1, false, 24 },
+ { AArch64::ST1Threev4h_POST, "st1", ".4h", 1, false, 24 },
+ { AArch64::ST1Threev2s_POST, "st1", ".2s", 1, false, 24 },
+ { AArch64::ST1Threev1d_POST, "st1", ".1d", 1, false, 24 },
+ { AArch64::ST1Fourv16b, "st1", ".16b", 0, false, 0 },
+ { AArch64::ST1Fourv8h, "st1", ".8h", 0, false, 0 },
+ { AArch64::ST1Fourv4s, "st1", ".4s", 0, false, 0 },
+ { AArch64::ST1Fourv2d, "st1", ".2d", 0, false, 0 },
+ { AArch64::ST1Fourv8b, "st1", ".8b", 0, false, 0 },
+ { AArch64::ST1Fourv4h, "st1", ".4h", 0, false, 0 },
+ { AArch64::ST1Fourv2s, "st1", ".2s", 0, false, 0 },
+ { AArch64::ST1Fourv1d, "st1", ".1d", 0, false, 0 },
+ { AArch64::ST1Fourv16b_POST, "st1", ".16b", 1, false, 64 },
+ { AArch64::ST1Fourv8h_POST, "st1", ".8h", 1, false, 64 },
+ { AArch64::ST1Fourv4s_POST, "st1", ".4s", 1, false, 64 },
+ { AArch64::ST1Fourv2d_POST, "st1", ".2d", 1, false, 64 },
+ { AArch64::ST1Fourv8b_POST, "st1", ".8b", 1, false, 32 },
+ { AArch64::ST1Fourv4h_POST, "st1", ".4h", 1, false, 32 },
+ { AArch64::ST1Fourv2s_POST, "st1", ".2s", 1, false, 32 },
+ { AArch64::ST1Fourv1d_POST, "st1", ".1d", 1, false, 32 },
+ { AArch64::ST2i8, "st2", ".b", 0, true, 0 },
+ { AArch64::ST2i16, "st2", ".h", 0, true, 0 },
+ { AArch64::ST2i32, "st2", ".s", 0, true, 0 },
+ { AArch64::ST2i64, "st2", ".d", 0, true, 0 },
+ { AArch64::ST2i8_POST, "st2", ".b", 1, true, 2 },
+ { AArch64::ST2i16_POST, "st2", ".h", 1, true, 4 },
+ { AArch64::ST2i32_POST, "st2", ".s", 1, true, 8 },
+ { AArch64::ST2i64_POST, "st2", ".d", 1, true, 16 },
+ { AArch64::ST2Twov16b, "st2", ".16b", 0, false, 0 },
+ { AArch64::ST2Twov8h, "st2", ".8h", 0, false, 0 },
+ { AArch64::ST2Twov4s, "st2", ".4s", 0, false, 0 },
+ { AArch64::ST2Twov2d, "st2", ".2d", 0, false, 0 },
+ { AArch64::ST2Twov8b, "st2", ".8b", 0, false, 0 },
+ { AArch64::ST2Twov4h, "st2", ".4h", 0, false, 0 },
+ { AArch64::ST2Twov2s, "st2", ".2s", 0, false, 0 },
+ { AArch64::ST2Twov16b_POST, "st2", ".16b", 1, false, 32 },
+ { AArch64::ST2Twov8h_POST, "st2", ".8h", 1, false, 32 },
+ { AArch64::ST2Twov4s_POST, "st2", ".4s", 1, false, 32 },
+ { AArch64::ST2Twov2d_POST, "st2", ".2d", 1, false, 32 },
+ { AArch64::ST2Twov8b_POST, "st2", ".8b", 1, false, 16 },
+ { AArch64::ST2Twov4h_POST, "st2", ".4h", 1, false, 16 },
+ { AArch64::ST2Twov2s_POST, "st2", ".2s", 1, false, 16 },
+ { AArch64::ST3i8, "st3", ".b", 0, true, 0 },
+ { AArch64::ST3i16, "st3", ".h", 0, true, 0 },
+ { AArch64::ST3i32, "st3", ".s", 0, true, 0 },
+ { AArch64::ST3i64, "st3", ".d", 0, true, 0 },
+ { AArch64::ST3i8_POST, "st3", ".b", 1, true, 3 },
+ { AArch64::ST3i16_POST, "st3", ".h", 1, true, 6 },
+ { AArch64::ST3i32_POST, "st3", ".s", 1, true, 12 },
+ { AArch64::ST3i64_POST, "st3", ".d", 1, true, 24 },
+ { AArch64::ST3Threev16b, "st3", ".16b", 0, false, 0 },
+ { AArch64::ST3Threev8h, "st3", ".8h", 0, false, 0 },
+ { AArch64::ST3Threev4s, "st3", ".4s", 0, false, 0 },
+ { AArch64::ST3Threev2d, "st3", ".2d", 0, false, 0 },
+ { AArch64::ST3Threev8b, "st3", ".8b", 0, false, 0 },
+ { AArch64::ST3Threev4h, "st3", ".4h", 0, false, 0 },
+ { AArch64::ST3Threev2s, "st3", ".2s", 0, false, 0 },
+ { AArch64::ST3Threev16b_POST, "st3", ".16b", 1, false, 48 },
+ { AArch64::ST3Threev8h_POST, "st3", ".8h", 1, false, 48 },
+ { AArch64::ST3Threev4s_POST, "st3", ".4s", 1, false, 48 },
+ { AArch64::ST3Threev2d_POST, "st3", ".2d", 1, false, 48 },
+ { AArch64::ST3Threev8b_POST, "st3", ".8b", 1, false, 24 },
+ { AArch64::ST3Threev4h_POST, "st3", ".4h", 1, false, 24 },
+ { AArch64::ST3Threev2s_POST, "st3", ".2s", 1, false, 24 },
+ { AArch64::ST4i8, "st4", ".b", 0, true, 0 },
+ { AArch64::ST4i16, "st4", ".h", 0, true, 0 },
+ { AArch64::ST4i32, "st4", ".s", 0, true, 0 },
+ { AArch64::ST4i64, "st4", ".d", 0, true, 0 },
+ { AArch64::ST4i8_POST, "st4", ".b", 1, true, 4 },
+ { AArch64::ST4i16_POST, "st4", ".h", 1, true, 8 },
+ { AArch64::ST4i32_POST, "st4", ".s", 1, true, 16 },
+ { AArch64::ST4i64_POST, "st4", ".d", 1, true, 32 },
+ { AArch64::ST4Fourv16b, "st4", ".16b", 0, false, 0 },
+ { AArch64::ST4Fourv8h, "st4", ".8h", 0, false, 0 },
+ { AArch64::ST4Fourv4s, "st4", ".4s", 0, false, 0 },
+ { AArch64::ST4Fourv2d, "st4", ".2d", 0, false, 0 },
+ { AArch64::ST4Fourv8b, "st4", ".8b", 0, false, 0 },
+ { AArch64::ST4Fourv4h, "st4", ".4h", 0, false, 0 },
+ { AArch64::ST4Fourv2s, "st4", ".2s", 0, false, 0 },
+ { AArch64::ST4Fourv16b_POST, "st4", ".16b", 1, false, 64 },
+ { AArch64::ST4Fourv8h_POST, "st4", ".8h", 1, false, 64 },
+ { AArch64::ST4Fourv4s_POST, "st4", ".4s", 1, false, 64 },
+ { AArch64::ST4Fourv2d_POST, "st4", ".2d", 1, false, 64 },
+ { AArch64::ST4Fourv8b_POST, "st4", ".8b", 1, false, 32 },
+ { AArch64::ST4Fourv4h_POST, "st4", ".4h", 1, false, 32 },
+ { AArch64::ST4Fourv2s_POST, "st4", ".2s", 1, false, 32 },
+};
+
+static const LdStNInstrDesc *getLdStNInstrDesc(unsigned Opcode) {
+ unsigned Idx;
+ for (Idx = 0; Idx != array_lengthof(LdStNInstInfo); ++Idx)
+ if (LdStNInstInfo[Idx].Opcode == Opcode)
+ return &LdStNInstInfo[Idx];
+
+ return nullptr;
+}
+
+void AArch64AppleInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
+ StringRef Annot,
+ const MCSubtargetInfo &STI) {
+ unsigned Opcode = MI->getOpcode();
+ StringRef Layout;
+
+ bool IsTbx;
+ if (isTblTbxInstruction(MI->getOpcode(), Layout, IsTbx)) {
+ O << "\t" << (IsTbx ? "tbx" : "tbl") << Layout << '\t'
+ << getRegisterName(MI->getOperand(0).getReg(), AArch64::vreg) << ", ";
+
+ unsigned ListOpNum = IsTbx ? 2 : 1;
+ printVectorList(MI, ListOpNum, STI, O, "");
+
+ O << ", "
+ << getRegisterName(MI->getOperand(ListOpNum + 1).getReg(), AArch64::vreg);
+ printAnnotation(O, Annot);
+ return;
+ }
+
+ if (const LdStNInstrDesc *LdStDesc = getLdStNInstrDesc(Opcode)) {
+ O << "\t" << LdStDesc->Mnemonic << LdStDesc->Layout << '\t';
+
+ // Now onto the operands: first a vector list with possible lane
+ // specifier. E.g. { v0 }[2]
+ int OpNum = LdStDesc->ListOperand;
+ printVectorList(MI, OpNum++, STI, O, "");
+
+ if (LdStDesc->HasLane)
+ O << '[' << MI->getOperand(OpNum++).getImm() << ']';
+
+ // Next the address: [xN]
+ unsigned AddrReg = MI->getOperand(OpNum++).getReg();
+ O << ", [" << getRegisterName(AddrReg) << ']';
+
+ // Finally, there might be a post-indexed offset.
+ if (LdStDesc->NaturalOffset != 0) {
+ unsigned Reg = MI->getOperand(OpNum++).getReg();
+ if (Reg != AArch64::XZR)
+ O << ", " << getRegisterName(Reg);
+ else {
+ assert(LdStDesc->NaturalOffset && "no offset on post-inc instruction?");
+ O << ", #" << LdStDesc->NaturalOffset;
+ }
+ }
+
+ printAnnotation(O, Annot);
+ return;
+ }
+
+ AArch64InstPrinter::printInst(MI, O, Annot, STI);
+}
+
+bool AArch64InstPrinter::printSysAlias(const MCInst *MI,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+#ifndef NDEBUG
+ unsigned Opcode = MI->getOpcode();
+ assert(Opcode == AArch64::SYSxt && "Invalid opcode for SYS alias!");
+#endif
+
+ const MCOperand &Op1 = MI->getOperand(0);
+ const MCOperand &Cn = MI->getOperand(1);
+ const MCOperand &Cm = MI->getOperand(2);
+ const MCOperand &Op2 = MI->getOperand(3);
+
+ unsigned Op1Val = Op1.getImm();
+ unsigned CnVal = Cn.getImm();
+ unsigned CmVal = Cm.getImm();
+ unsigned Op2Val = Op2.getImm();
+
+ uint16_t Encoding = Op2Val;
+ Encoding |= CmVal << 3;
+ Encoding |= CnVal << 7;
+ Encoding |= Op1Val << 11;
+
+ bool NeedsReg;
+ std::string Ins;
+ std::string Name;
+
+ if (CnVal == 7) {
+ switch (CmVal) {
+ default: return false;
+ // Maybe IC, maybe Prediction Restriction
+ case 1:
+ switch (Op1Val) {
+ default: return false;
+ case 0: goto Search_IC;
+ case 3: goto Search_PRCTX;
+ }
+ // Prediction Restriction aliases
+ case 3: {
+ Search_PRCTX:
+ const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByEncoding(Encoding >> 3);
+ if (!PRCTX || !PRCTX->haveFeatures(STI.getFeatureBits()))
+ return false;
+
+ NeedsReg = PRCTX->NeedsReg;
+ switch (Op2Val) {
+ default: return false;
+ case 4: Ins = "cfp\t"; break;
+ case 5: Ins = "dvp\t"; break;
+ case 7: Ins = "cpp\t"; break;
+ }
+ Name = std::string(PRCTX->Name);
+ }
+ break;
+ // IC aliases
+ case 5: {
+ Search_IC:
+ const AArch64IC::IC *IC = AArch64IC::lookupICByEncoding(Encoding);
+ if (!IC || !IC->haveFeatures(STI.getFeatureBits()))
+ return false;
+
+ NeedsReg = IC->NeedsReg;
+ Ins = "ic\t";
+ Name = std::string(IC->Name);
+ }
+ break;
+ // DC aliases
+ case 4: case 6: case 10: case 11: case 12: case 13: case 14:
+ {
+ const AArch64DC::DC *DC = AArch64DC::lookupDCByEncoding(Encoding);
+ if (!DC || !DC->haveFeatures(STI.getFeatureBits()))
+ return false;
+
+ NeedsReg = true;
+ Ins = "dc\t";
+ Name = std::string(DC->Name);
+ }
+ break;
+ // AT aliases
+ case 8: case 9: {
+ const AArch64AT::AT *AT = AArch64AT::lookupATByEncoding(Encoding);
+ if (!AT || !AT->haveFeatures(STI.getFeatureBits()))
+ return false;
+
+ NeedsReg = true;
+ Ins = "at\t";
+ Name = std::string(AT->Name);
+ }
+ break;
+ }
+ } else if (CnVal == 8) {
+ // TLBI aliases
+ const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByEncoding(Encoding);
+ if (!TLBI || !TLBI->haveFeatures(STI.getFeatureBits()))
+ return false;
+
+ NeedsReg = TLBI->NeedsReg;
+ Ins = "tlbi\t";
+ Name = std::string(TLBI->Name);
+ }
+ else
+ return false;
+
+ std::string Str = Ins + Name;
+ std::transform(Str.begin(), Str.end(), Str.begin(), ::tolower);
+
+ O << '\t' << Str;
+ if (NeedsReg)
+ O << ", " << getRegisterName(MI->getOperand(4).getReg());
+
+ return true;
+}
+
+void AArch64InstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isReg()) {
+ unsigned Reg = Op.getReg();
+ O << getRegisterName(Reg);
+ } else if (Op.isImm()) {
+ printImm(MI, OpNo, STI, O);
+ } else {
+ assert(Op.isExpr() && "unknown operand kind in printOperand");
+ Op.getExpr()->print(O, &MAI);
+ }
+}
+
+void AArch64InstPrinter::printImm(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ O << "#" << formatImm(Op.getImm());
+}
+
+void AArch64InstPrinter::printImmHex(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ O << format("#%#llx", Op.getImm());
+}
+
+void AArch64InstPrinter::printPostIncOperand(const MCInst *MI, unsigned OpNo,
+ unsigned Imm, raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isReg()) {
+ unsigned Reg = Op.getReg();
+ if (Reg == AArch64::XZR)
+ O << "#" << Imm;
+ else
+ O << getRegisterName(Reg);
+ } else
+ llvm_unreachable("unknown operand kind in printPostIncOperand64");
+}
+
+void AArch64InstPrinter::printVRegOperand(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ assert(Op.isReg() && "Non-register vreg operand!");
+ unsigned Reg = Op.getReg();
+ O << getRegisterName(Reg, AArch64::vreg);
+}
+
+void AArch64InstPrinter::printSysCROperand(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ assert(Op.isImm() && "System instruction C[nm] operands must be immediates!");
+ O << "c" << Op.getImm();
+}
+
+void AArch64InstPrinter::printAddSubImm(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ const MCOperand &MO = MI->getOperand(OpNum);
+ if (MO.isImm()) {
+ unsigned Val = (MO.getImm() & 0xfff);
+ assert(Val == MO.getImm() && "Add/sub immediate out of range!");
+ unsigned Shift =
+ AArch64_AM::getShiftValue(MI->getOperand(OpNum + 1).getImm());
+ O << '#' << formatImm(Val);
+ if (Shift != 0)
+ printShifter(MI, OpNum + 1, STI, O);
+
+ if (CommentStream)
+ *CommentStream << '=' << formatImm(Val << Shift) << '\n';
+ } else {
+ assert(MO.isExpr() && "Unexpected operand type!");
+ MO.getExpr()->print(O, &MAI);
+ printShifter(MI, OpNum + 1, STI, O);
+ }
+}
+
+template <typename T>
+void AArch64InstPrinter::printLogicalImm(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ uint64_t Val = MI->getOperand(OpNum).getImm();
+ O << "#0x";
+ O.write_hex(AArch64_AM::decodeLogicalImmediate(Val, 8 * sizeof(T)));
+}
+
+void AArch64InstPrinter::printShifter(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned Val = MI->getOperand(OpNum).getImm();
+ // LSL #0 should not be printed.
+ if (AArch64_AM::getShiftType(Val) == AArch64_AM::LSL &&
+ AArch64_AM::getShiftValue(Val) == 0)
+ return;
+ O << ", " << AArch64_AM::getShiftExtendName(AArch64_AM::getShiftType(Val))
+ << " #" << AArch64_AM::getShiftValue(Val);
+}
+
+void AArch64InstPrinter::printShiftedRegister(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ O << getRegisterName(MI->getOperand(OpNum).getReg());
+ printShifter(MI, OpNum + 1, STI, O);
+}
+
+void AArch64InstPrinter::printExtendedRegister(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ O << getRegisterName(MI->getOperand(OpNum).getReg());
+ printArithExtend(MI, OpNum + 1, STI, O);
+}
+
+void AArch64InstPrinter::printArithExtend(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned Val = MI->getOperand(OpNum).getImm();
+ AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getArithExtendType(Val);
+ unsigned ShiftVal = AArch64_AM::getArithShiftValue(Val);
+
+ // If the destination or first source register operand is [W]SP, print
+ // UXTW/UXTX as LSL, and if the shift amount is also zero, print nothing at
+ // all.
+ if (ExtType == AArch64_AM::UXTW || ExtType == AArch64_AM::UXTX) {
+ unsigned Dest = MI->getOperand(0).getReg();
+ unsigned Src1 = MI->getOperand(1).getReg();
+ if ( ((Dest == AArch64::SP || Src1 == AArch64::SP) &&
+ ExtType == AArch64_AM::UXTX) ||
+ ((Dest == AArch64::WSP || Src1 == AArch64::WSP) &&
+ ExtType == AArch64_AM::UXTW) ) {
+ if (ShiftVal != 0)
+ O << ", lsl #" << ShiftVal;
+ return;
+ }
+ }
+ O << ", " << AArch64_AM::getShiftExtendName(ExtType);
+ if (ShiftVal != 0)
+ O << " #" << ShiftVal;
+}
+
+static void printMemExtendImpl(bool SignExtend, bool DoShift,
+ unsigned Width, char SrcRegKind,
+ raw_ostream &O) {
+ // sxtw, sxtx, uxtw or lsl (== uxtx)
+ bool IsLSL = !SignExtend && SrcRegKind == 'x';
+ if (IsLSL)
+ O << "lsl";
+ else
+ O << (SignExtend ? 's' : 'u') << "xt" << SrcRegKind;
+
+ if (DoShift || IsLSL)
+ O << " #" << Log2_32(Width / 8);
+}
+
+void AArch64InstPrinter::printMemExtend(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O, char SrcRegKind,
+ unsigned Width) {
+ bool SignExtend = MI->getOperand(OpNum).getImm();
+ bool DoShift = MI->getOperand(OpNum + 1).getImm();
+ printMemExtendImpl(SignExtend, DoShift, Width, SrcRegKind, O);
+}
+
+template <bool SignExtend, int ExtWidth, char SrcRegKind, char Suffix>
+void AArch64InstPrinter::printRegWithShiftExtend(const MCInst *MI,
+ unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ printOperand(MI, OpNum, STI, O);
+ if (Suffix == 's' || Suffix == 'd')
+ O << '.' << Suffix;
+ else
+ assert(Suffix == 0 && "Unsupported suffix size");
+
+ bool DoShift = ExtWidth != 8;
+ if (SignExtend || DoShift || SrcRegKind == 'w') {
+ O << ", ";
+ printMemExtendImpl(SignExtend, DoShift, ExtWidth, SrcRegKind, O);
+ }
+}
+
+void AArch64InstPrinter::printCondCode(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ AArch64CC::CondCode CC = (AArch64CC::CondCode)MI->getOperand(OpNum).getImm();
+ O << AArch64CC::getCondCodeName(CC);
+}
+
+void AArch64InstPrinter::printInverseCondCode(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ AArch64CC::CondCode CC = (AArch64CC::CondCode)MI->getOperand(OpNum).getImm();
+ O << AArch64CC::getCondCodeName(AArch64CC::getInvertedCondCode(CC));
+}
+
+void AArch64InstPrinter::printAMNoIndex(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ O << '[' << getRegisterName(MI->getOperand(OpNum).getReg()) << ']';
+}
+
+template<int Scale>
+void AArch64InstPrinter::printImmScale(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ O << '#' << formatImm(Scale * MI->getOperand(OpNum).getImm());
+}
+
+void AArch64InstPrinter::printUImm12Offset(const MCInst *MI, unsigned OpNum,
+ unsigned Scale, raw_ostream &O) {
+ const MCOperand MO = MI->getOperand(OpNum);
+ if (MO.isImm()) {
+ O << "#" << formatImm(MO.getImm() * Scale);
+ } else {
+ assert(MO.isExpr() && "Unexpected operand type!");
+ MO.getExpr()->print(O, &MAI);
+ }
+}
+
+void AArch64InstPrinter::printAMIndexedWB(const MCInst *MI, unsigned OpNum,
+ unsigned Scale, raw_ostream &O) {
+ const MCOperand MO1 = MI->getOperand(OpNum + 1);
+ O << '[' << getRegisterName(MI->getOperand(OpNum).getReg());
+ if (MO1.isImm()) {
+ O << ", #" << formatImm(MO1.getImm() * Scale);
+ } else {
+ assert(MO1.isExpr() && "Unexpected operand type!");
+ O << ", ";
+ MO1.getExpr()->print(O, &MAI);
+ }
+ O << ']';
+}
+
+template <bool IsSVEPrefetch>
+void AArch64InstPrinter::printPrefetchOp(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned prfop = MI->getOperand(OpNum).getImm();
+ if (IsSVEPrefetch) {
+ if (auto PRFM = AArch64SVEPRFM::lookupSVEPRFMByEncoding(prfop)) {
+ O << PRFM->Name;
+ return;
+ }
+ } else if (auto PRFM = AArch64PRFM::lookupPRFMByEncoding(prfop)) {
+ O << PRFM->Name;
+ return;
+ }
+
+ O << '#' << formatImm(prfop);
+}
+
+void AArch64InstPrinter::printPSBHintOp(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned psbhintop = MI->getOperand(OpNum).getImm();
+ auto PSB = AArch64PSBHint::lookupPSBByEncoding(psbhintop);
+ if (PSB)
+ O << PSB->Name;
+ else
+ O << '#' << formatImm(psbhintop);
+}
+
+void AArch64InstPrinter::printBTIHintOp(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned btihintop = (MI->getOperand(OpNum).getImm() ^ 32) >> 1;
+ auto BTI = AArch64BTIHint::lookupBTIByEncoding(btihintop);
+ if (BTI)
+ O << BTI->Name;
+ else
+ O << '#' << formatImm(btihintop);
+}
+
+void AArch64InstPrinter::printFPImmOperand(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ const MCOperand &MO = MI->getOperand(OpNum);
+ float FPImm =
+ MO.isFPImm() ? MO.getFPImm() : AArch64_AM::getFPImmFloat(MO.getImm());
+
+ // 8 decimal places are enough to perfectly represent permitted floats.
+ O << format("#%.8f", FPImm);
+}
+
+static unsigned getNextVectorRegister(unsigned Reg, unsigned Stride = 1) {
+ while (Stride--) {
+ switch (Reg) {
+ default:
+ llvm_unreachable("Vector register expected!");
+ case AArch64::Q0: Reg = AArch64::Q1; break;
+ case AArch64::Q1: Reg = AArch64::Q2; break;
+ case AArch64::Q2: Reg = AArch64::Q3; break;
+ case AArch64::Q3: Reg = AArch64::Q4; break;
+ case AArch64::Q4: Reg = AArch64::Q5; break;
+ case AArch64::Q5: Reg = AArch64::Q6; break;
+ case AArch64::Q6: Reg = AArch64::Q7; break;
+ case AArch64::Q7: Reg = AArch64::Q8; break;
+ case AArch64::Q8: Reg = AArch64::Q9; break;
+ case AArch64::Q9: Reg = AArch64::Q10; break;
+ case AArch64::Q10: Reg = AArch64::Q11; break;
+ case AArch64::Q11: Reg = AArch64::Q12; break;
+ case AArch64::Q12: Reg = AArch64::Q13; break;
+ case AArch64::Q13: Reg = AArch64::Q14; break;
+ case AArch64::Q14: Reg = AArch64::Q15; break;
+ case AArch64::Q15: Reg = AArch64::Q16; break;
+ case AArch64::Q16: Reg = AArch64::Q17; break;
+ case AArch64::Q17: Reg = AArch64::Q18; break;
+ case AArch64::Q18: Reg = AArch64::Q19; break;
+ case AArch64::Q19: Reg = AArch64::Q20; break;
+ case AArch64::Q20: Reg = AArch64::Q21; break;
+ case AArch64::Q21: Reg = AArch64::Q22; break;
+ case AArch64::Q22: Reg = AArch64::Q23; break;
+ case AArch64::Q23: Reg = AArch64::Q24; break;
+ case AArch64::Q24: Reg = AArch64::Q25; break;
+ case AArch64::Q25: Reg = AArch64::Q26; break;
+ case AArch64::Q26: Reg = AArch64::Q27; break;
+ case AArch64::Q27: Reg = AArch64::Q28; break;
+ case AArch64::Q28: Reg = AArch64::Q29; break;
+ case AArch64::Q29: Reg = AArch64::Q30; break;
+ case AArch64::Q30: Reg = AArch64::Q31; break;
+ // Vector lists can wrap around.
+ case AArch64::Q31:
+ Reg = AArch64::Q0;
+ break;
+ case AArch64::Z0: Reg = AArch64::Z1; break;
+ case AArch64::Z1: Reg = AArch64::Z2; break;
+ case AArch64::Z2: Reg = AArch64::Z3; break;
+ case AArch64::Z3: Reg = AArch64::Z4; break;
+ case AArch64::Z4: Reg = AArch64::Z5; break;
+ case AArch64::Z5: Reg = AArch64::Z6; break;
+ case AArch64::Z6: Reg = AArch64::Z7; break;
+ case AArch64::Z7: Reg = AArch64::Z8; break;
+ case AArch64::Z8: Reg = AArch64::Z9; break;
+ case AArch64::Z9: Reg = AArch64::Z10; break;
+ case AArch64::Z10: Reg = AArch64::Z11; break;
+ case AArch64::Z11: Reg = AArch64::Z12; break;
+ case AArch64::Z12: Reg = AArch64::Z13; break;
+ case AArch64::Z13: Reg = AArch64::Z14; break;
+ case AArch64::Z14: Reg = AArch64::Z15; break;
+ case AArch64::Z15: Reg = AArch64::Z16; break;
+ case AArch64::Z16: Reg = AArch64::Z17; break;
+ case AArch64::Z17: Reg = AArch64::Z18; break;
+ case AArch64::Z18: Reg = AArch64::Z19; break;
+ case AArch64::Z19: Reg = AArch64::Z20; break;
+ case AArch64::Z20: Reg = AArch64::Z21; break;
+ case AArch64::Z21: Reg = AArch64::Z22; break;
+ case AArch64::Z22: Reg = AArch64::Z23; break;
+ case AArch64::Z23: Reg = AArch64::Z24; break;
+ case AArch64::Z24: Reg = AArch64::Z25; break;
+ case AArch64::Z25: Reg = AArch64::Z26; break;
+ case AArch64::Z26: Reg = AArch64::Z27; break;
+ case AArch64::Z27: Reg = AArch64::Z28; break;
+ case AArch64::Z28: Reg = AArch64::Z29; break;
+ case AArch64::Z29: Reg = AArch64::Z30; break;
+ case AArch64::Z30: Reg = AArch64::Z31; break;
+ // Vector lists can wrap around.
+ case AArch64::Z31:
+ Reg = AArch64::Z0;
+ break;
+ }
+ }
+ return Reg;
+}
+
+template<unsigned size>
+void AArch64InstPrinter::printGPRSeqPairsClassOperand(const MCInst *MI,
+ unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ static_assert(size == 64 || size == 32,
+ "Template parameter must be either 32 or 64");
+ unsigned Reg = MI->getOperand(OpNum).getReg();
+
+ unsigned Sube = (size == 32) ? AArch64::sube32 : AArch64::sube64;
+ unsigned Subo = (size == 32) ? AArch64::subo32 : AArch64::subo64;
+
+ unsigned Even = MRI.getSubReg(Reg, Sube);
+ unsigned Odd = MRI.getSubReg(Reg, Subo);
+ O << getRegisterName(Even) << ", " << getRegisterName(Odd);
+}
+
+void AArch64InstPrinter::printVectorList(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O,
+ StringRef LayoutSuffix) {
+ unsigned Reg = MI->getOperand(OpNum).getReg();
+
+ O << "{ ";
+
+ // Work out how many registers there are in the list (if there is an actual
+ // list).
+ unsigned NumRegs = 1;
+ if (MRI.getRegClass(AArch64::DDRegClassID).contains(Reg) ||
+ MRI.getRegClass(AArch64::ZPR2RegClassID).contains(Reg) ||
+ MRI.getRegClass(AArch64::QQRegClassID).contains(Reg))
+ NumRegs = 2;
+ else if (MRI.getRegClass(AArch64::DDDRegClassID).contains(Reg) ||
+ MRI.getRegClass(AArch64::ZPR3RegClassID).contains(Reg) ||
+ MRI.getRegClass(AArch64::QQQRegClassID).contains(Reg))
+ NumRegs = 3;
+ else if (MRI.getRegClass(AArch64::DDDDRegClassID).contains(Reg) ||
+ MRI.getRegClass(AArch64::ZPR4RegClassID).contains(Reg) ||
+ MRI.getRegClass(AArch64::QQQQRegClassID).contains(Reg))
+ NumRegs = 4;
+
+ // Now forget about the list and find out what the first register is.
+ if (unsigned FirstReg = MRI.getSubReg(Reg, AArch64::dsub0))
+ Reg = FirstReg;
+ else if (unsigned FirstReg = MRI.getSubReg(Reg, AArch64::qsub0))
+ Reg = FirstReg;
+ else if (unsigned FirstReg = MRI.getSubReg(Reg, AArch64::zsub0))
+ Reg = FirstReg;
+
+ // If it's a D-reg, we need to promote it to the equivalent Q-reg before
+ // printing (otherwise getRegisterName fails).
+ if (MRI.getRegClass(AArch64::FPR64RegClassID).contains(Reg)) {
+ const MCRegisterClass &FPR128RC =
+ MRI.getRegClass(AArch64::FPR128RegClassID);
+ Reg = MRI.getMatchingSuperReg(Reg, AArch64::dsub, &FPR128RC);
+ }
+
+ for (unsigned i = 0; i < NumRegs; ++i, Reg = getNextVectorRegister(Reg)) {
+ if (MRI.getRegClass(AArch64::ZPRRegClassID).contains(Reg))
+ O << getRegisterName(Reg) << LayoutSuffix;
+ else
+ O << getRegisterName(Reg, AArch64::vreg) << LayoutSuffix;
+
+ if (i + 1 != NumRegs)
+ O << ", ";
+ }
+
+ O << " }";
+}
+
+void
+AArch64InstPrinter::printImplicitlyTypedVectorList(const MCInst *MI,
+ unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ printVectorList(MI, OpNum, STI, O, "");
+}
+
+template <unsigned NumLanes, char LaneKind>
+void AArch64InstPrinter::printTypedVectorList(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ std::string Suffix(".");
+ if (NumLanes)
+ Suffix += itostr(NumLanes) + LaneKind;
+ else
+ Suffix += LaneKind;
+
+ printVectorList(MI, OpNum, STI, O, Suffix);
+}
+
+void AArch64InstPrinter::printVectorIndex(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ O << "[" << MI->getOperand(OpNum).getImm() << "]";
+}
+
+void AArch64InstPrinter::printAlignedLabel(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNum);
+
+ // If the label has already been resolved to an immediate offset (say, when
+ // we're running the disassembler), just print the immediate.
+ if (Op.isImm()) {
+ O << "#" << formatImm(Op.getImm() * 4);
+ return;
+ }
+
+ // If the branch target is simply an address then print it in hex.
+ const MCConstantExpr *BranchTarget =
+ dyn_cast<MCConstantExpr>(MI->getOperand(OpNum).getExpr());
+ int64_t Address;
+ if (BranchTarget && BranchTarget->evaluateAsAbsolute(Address)) {
+ O << "0x";
+ O.write_hex(Address);
+ } else {
+ // Otherwise, just print the expression.
+ MI->getOperand(OpNum).getExpr()->print(O, &MAI);
+ }
+}
+
+void AArch64InstPrinter::printAdrpLabel(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNum);
+
+ // If the label has already been resolved to an immediate offset (say, when
+ // we're running the disassembler), just print the immediate.
+ if (Op.isImm()) {
+ O << "#" << formatImm(Op.getImm() * (1 << 12));
+ return;
+ }
+
+ // Otherwise, just print the expression.
+ MI->getOperand(OpNum).getExpr()->print(O, &MAI);
+}
+
+void AArch64InstPrinter::printBarrierOption(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned Val = MI->getOperand(OpNo).getImm();
+ unsigned Opcode = MI->getOpcode();
+
+ StringRef Name;
+ if (Opcode == AArch64::ISB) {
+ auto ISB = AArch64ISB::lookupISBByEncoding(Val);
+ Name = ISB ? ISB->Name : "";
+ } else if (Opcode == AArch64::TSB) {
+ auto TSB = AArch64TSB::lookupTSBByEncoding(Val);
+ Name = TSB ? TSB->Name : "";
+ } else {
+ auto DB = AArch64DB::lookupDBByEncoding(Val);
+ Name = DB ? DB->Name : "";
+ }
+ if (!Name.empty())
+ O << Name;
+ else
+ O << "#" << Val;
+}
+
+void AArch64InstPrinter::printMRSSystemRegister(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned Val = MI->getOperand(OpNo).getImm();
+
+ // Horrible hack for the one register that has identical encodings but
+ // different names in MSR and MRS. Because of this, one of MRS and MSR is
+ // going to get the wrong entry
+ if (Val == AArch64SysReg::DBGDTRRX_EL0) {
+ O << "DBGDTRRX_EL0";
+ return;
+ }
+
+ const AArch64SysReg::SysReg *Reg = AArch64SysReg::lookupSysRegByEncoding(Val);
+ if (Reg && Reg->Readable && Reg->haveFeatures(STI.getFeatureBits()))
+ O << Reg->Name;
+ else
+ O << AArch64SysReg::genericRegisterString(Val);
+}
+
+void AArch64InstPrinter::printMSRSystemRegister(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned Val = MI->getOperand(OpNo).getImm();
+
+ // Horrible hack for the one register that has identical encodings but
+ // different names in MSR and MRS. Because of this, one of MRS and MSR is
+ // going to get the wrong entry
+ if (Val == AArch64SysReg::DBGDTRTX_EL0) {
+ O << "DBGDTRTX_EL0";
+ return;
+ }
+
+ const AArch64SysReg::SysReg *Reg = AArch64SysReg::lookupSysRegByEncoding(Val);
+ if (Reg && Reg->Writeable && Reg->haveFeatures(STI.getFeatureBits()))
+ O << Reg->Name;
+ else
+ O << AArch64SysReg::genericRegisterString(Val);
+}
+
+void AArch64InstPrinter::printSystemPStateField(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned Val = MI->getOperand(OpNo).getImm();
+
+ auto PState = AArch64PState::lookupPStateByEncoding(Val);
+ if (PState && PState->haveFeatures(STI.getFeatureBits()))
+ O << PState->Name;
+ else
+ O << "#" << formatImm(Val);
+}
+
+void AArch64InstPrinter::printSIMDType10Operand(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned RawVal = MI->getOperand(OpNo).getImm();
+ uint64_t Val = AArch64_AM::decodeAdvSIMDModImmType10(RawVal);
+ O << format("#%#016llx", Val);
+}
+
+template<int64_t Angle, int64_t Remainder>
+void AArch64InstPrinter::printComplexRotationOp(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned Val = MI->getOperand(OpNo).getImm();
+ O << "#" << (Val * Angle) + Remainder;
+}
+
+void AArch64InstPrinter::printSVEPattern(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned Val = MI->getOperand(OpNum).getImm();
+ if (auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByEncoding(Val))
+ O << Pat->Name;
+ else
+ O << '#' << formatImm(Val);
+}
+
+template <char suffix>
+void AArch64InstPrinter::printSVERegOp(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ switch (suffix) {
+ case 0:
+ case 'b':
+ case 'h':
+ case 's':
+ case 'd':
+ case 'q':
+ break;
+ default: llvm_unreachable("Invalid kind specifier.");
+ }
+
+ unsigned Reg = MI->getOperand(OpNum).getReg();
+ O << getRegisterName(Reg);
+ if (suffix != 0)
+ O << '.' << suffix;
+}
+
+template <typename T>
+void AArch64InstPrinter::printImmSVE(T Value, raw_ostream &O) {
+ typename std::make_unsigned<T>::type HexValue = Value;
+
+ if (getPrintImmHex())
+ O << '#' << formatHex((uint64_t)HexValue);
+ else
+ O << '#' << formatDec(Value);
+
+ if (CommentStream) {
+ // Do the opposite to that used for instruction operands.
+ if (getPrintImmHex())
+ *CommentStream << '=' << formatDec(HexValue) << '\n';
+ else
+ *CommentStream << '=' << formatHex((uint64_t)Value) << '\n';
+ }
+}
+
+template <typename T>
+void AArch64InstPrinter::printImm8OptLsl(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned UnscaledVal = MI->getOperand(OpNum).getImm();
+ unsigned Shift = MI->getOperand(OpNum + 1).getImm();
+ assert(AArch64_AM::getShiftType(Shift) == AArch64_AM::LSL &&
+ "Unexepected shift type!");
+
+ // #0 lsl #8 is never pretty printed
+ if ((UnscaledVal == 0) && (AArch64_AM::getShiftValue(Shift) != 0)) {
+ O << '#' << formatImm(UnscaledVal);
+ printShifter(MI, OpNum + 1, STI, O);
+ return;
+ }
+
+ T Val;
+ if (std::is_signed<T>())
+ Val = (int8_t)UnscaledVal * (1 << AArch64_AM::getShiftValue(Shift));
+ else
+ Val = (uint8_t)UnscaledVal * (1 << AArch64_AM::getShiftValue(Shift));
+
+ printImmSVE(Val, O);
+}
+
+template <typename T>
+void AArch64InstPrinter::printSVELogicalImm(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ typedef typename std::make_signed<T>::type SignedT;
+ typedef typename std::make_unsigned<T>::type UnsignedT;
+
+ uint64_t Val = MI->getOperand(OpNum).getImm();
+ UnsignedT PrintVal = AArch64_AM::decodeLogicalImmediate(Val, 64);
+
+ // Prefer the default format for 16bit values, hex otherwise.
+ if ((int16_t)PrintVal == (SignedT)PrintVal)
+ printImmSVE((T)PrintVal, O);
+ else if ((uint16_t)PrintVal == PrintVal)
+ printImmSVE(PrintVal, O);
+ else
+ O << '#' << formatHex((uint64_t)PrintVal);
+}
+
+template <int Width>
+void AArch64InstPrinter::printZPRasFPR(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned Base;
+ switch (Width) {
+ case 8: Base = AArch64::B0; break;
+ case 16: Base = AArch64::H0; break;
+ case 32: Base = AArch64::S0; break;
+ case 64: Base = AArch64::D0; break;
+ case 128: Base = AArch64::Q0; break;
+ default:
+ llvm_unreachable("Unsupported width");
+ }
+ unsigned Reg = MI->getOperand(OpNum).getReg();
+ O << getRegisterName(Reg - AArch64::Z0 + Base);
+}
+
+template <unsigned ImmIs0, unsigned ImmIs1>
+void AArch64InstPrinter::printExactFPImm(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ auto *Imm0Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmIs0);
+ auto *Imm1Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmIs1);
+ unsigned Val = MI->getOperand(OpNum).getImm();
+ O << "#" << (Val ? Imm1Desc->Repr : Imm0Desc->Repr);
+}
+
+void AArch64InstPrinter::printGPR64as32(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned Reg = MI->getOperand(OpNum).getReg();
+ O << getRegisterName(getWRegFromXReg(Reg));
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.h
new file mode 100644
index 000000000000..5311f73ca21c
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.h
@@ -0,0 +1,222 @@
+//===-- AArch64InstPrinter.h - Convert AArch64 MCInst to assembly syntax --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints an AArch64 MCInst to a .s file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64INSTPRINTER_H
+#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64INSTPRINTER_H
+
+#include "MCTargetDesc/AArch64MCTargetDesc.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCInstPrinter.h"
+#include "../Utils/AArch64BaseInfo.h"
+
+namespace llvm {
+
+class AArch64InstPrinter : public MCInstPrinter {
+public:
+ AArch64InstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI);
+
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot,
+ const MCSubtargetInfo &STI) override;
+ void printRegName(raw_ostream &OS, unsigned RegNo) const override;
+
+ // Autogenerated by tblgen.
+ virtual void printInstruction(const MCInst *MI, const MCSubtargetInfo &STI,
+ raw_ostream &O);
+ virtual bool printAliasInstr(const MCInst *MI, const MCSubtargetInfo &STI,
+ raw_ostream &O);
+ virtual void printCustomAliasOperand(const MCInst *MI, unsigned OpIdx,
+ unsigned PrintMethodIdx,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O);
+
+ virtual StringRef getRegName(unsigned RegNo) const {
+ return getRegisterName(RegNo);
+ }
+
+ static const char *getRegisterName(unsigned RegNo,
+ unsigned AltIdx = AArch64::NoRegAltName);
+
+protected:
+ bool printSysAlias(const MCInst *MI, const MCSubtargetInfo &STI,
+ raw_ostream &O);
+ // Operand printers
+ void printOperand(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
+ raw_ostream &O);
+ void printImm(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
+ raw_ostream &O);
+ void printImmHex(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
+ raw_ostream &O);
+ template <typename T> void printImmSVE(T Value, raw_ostream &O);
+ void printPostIncOperand(const MCInst *MI, unsigned OpNo, unsigned Imm,
+ raw_ostream &O);
+ template <int Amount>
+ void printPostIncOperand(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI, raw_ostream &O) {
+ printPostIncOperand(MI, OpNo, Amount, O);
+ }
+
+ void printVRegOperand(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printSysCROperand(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printAddSubImm(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ template <typename T>
+ void printLogicalImm(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printShifter(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printShiftedRegister(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printExtendedRegister(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printArithExtend(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+
+ void printMemExtend(const MCInst *MI, unsigned OpNum, raw_ostream &O,
+ char SrcRegKind, unsigned Width);
+ template <char SrcRegKind, unsigned Width>
+ void printMemExtend(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O) {
+ printMemExtend(MI, OpNum, O, SrcRegKind, Width);
+ }
+ template <bool SignedExtend, int ExtWidth, char SrcRegKind, char Suffix>
+ void printRegWithShiftExtend(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printCondCode(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printInverseCondCode(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printAlignedLabel(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printUImm12Offset(const MCInst *MI, unsigned OpNum, unsigned Scale,
+ raw_ostream &O);
+ void printAMIndexedWB(const MCInst *MI, unsigned OpNum, unsigned Scale,
+ raw_ostream &O);
+
+ template <int Scale>
+ void printUImm12Offset(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O) {
+ printUImm12Offset(MI, OpNum, Scale, O);
+ }
+
+ template <int BitWidth>
+ void printAMIndexedWB(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O) {
+ printAMIndexedWB(MI, OpNum, BitWidth / 8, O);
+ }
+
+ void printAMNoIndex(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+
+ template <int Scale>
+ void printImmScale(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+
+ template <bool IsSVEPrefetch = false>
+ void printPrefetchOp(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+
+ void printPSBHintOp(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+
+ void printBTIHintOp(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+
+ void printFPImmOperand(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+
+ void printVectorList(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O,
+ StringRef LayoutSuffix);
+
+ /// Print a list of vector registers where the type suffix is implicit
+ /// (i.e. attached to the instruction rather than the registers).
+ void printImplicitlyTypedVectorList(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O);
+
+ template <unsigned NumLanes, char LaneKind>
+ void printTypedVectorList(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+
+ void printVectorIndex(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printAdrpLabel(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printBarrierOption(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printMSRSystemRegister(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printMRSSystemRegister(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printSystemPStateField(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printSIMDType10Operand(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ template<int64_t Angle, int64_t Remainder>
+ void printComplexRotationOp(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ template<unsigned size>
+ void printGPRSeqPairsClassOperand(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O);
+ template <typename T>
+ void printImm8OptLsl(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ template <typename T>
+ void printSVELogicalImm(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printSVEPattern(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ template <char = 0>
+ void printSVERegOp(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printGPR64as32(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ template <int Width>
+ void printZPRasFPR(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ template <unsigned ImmIs0, unsigned ImmIs1>
+ void printExactFPImm(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+};
+
+class AArch64AppleInstPrinter : public AArch64InstPrinter {
+public:
+ AArch64AppleInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI);
+
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot,
+ const MCSubtargetInfo &STI) override;
+
+ void printInstruction(const MCInst *MI, const MCSubtargetInfo &STI,
+ raw_ostream &O) override;
+ bool printAliasInstr(const MCInst *MI, const MCSubtargetInfo &STI,
+ raw_ostream &O) override;
+ void printCustomAliasOperand(const MCInst *MI, unsigned OpIdx,
+ unsigned PrintMethodIdx,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) override;
+
+ StringRef getRegName(unsigned RegNo) const override {
+ return getRegisterName(RegNo);
+ }
+
+ static const char *getRegisterName(unsigned RegNo,
+ unsigned AltIdx = AArch64::NoRegAltName);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64INSTPRINTER_H
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
new file mode 100644
index 000000000000..ecff1ab0a8b3
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
@@ -0,0 +1,135 @@
+//===-- AArch64MCAsmInfo.cpp - AArch64 asm properties ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations of the AArch64MCAsmInfo properties.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AArch64MCAsmInfo.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Support/CommandLine.h"
+using namespace llvm;
+
+enum AsmWriterVariantTy {
+ Default = -1,
+ Generic = 0,
+ Apple = 1
+};
+
+static cl::opt<AsmWriterVariantTy> AsmWriterVariant(
+ "aarch64-neon-syntax", cl::init(Default),
+ cl::desc("Choose style of NEON code to emit from AArch64 backend:"),
+ cl::values(clEnumValN(Generic, "generic", "Emit generic NEON assembly"),
+ clEnumValN(Apple, "apple", "Emit Apple-style NEON assembly")));
+
+AArch64MCAsmInfoDarwin::AArch64MCAsmInfoDarwin() {
+ // We prefer NEON instructions to be printed in the short, Apple-specific
+ // form when targeting Darwin.
+ AssemblerDialect = AsmWriterVariant == Default ? Apple : AsmWriterVariant;
+
+ PrivateGlobalPrefix = "L";
+ PrivateLabelPrefix = "L";
+ SeparatorString = "%%";
+ CommentString = ";";
+ CodePointerSize = CalleeSaveStackSlotSize = 8;
+
+ AlignmentIsInBytes = false;
+ UsesELFSectionDirectiveForBSS = true;
+ SupportsDebugInformation = true;
+ UseDataRegionDirectives = true;
+
+ ExceptionsType = ExceptionHandling::DwarfCFI;
+}
+
+const MCExpr *AArch64MCAsmInfoDarwin::getExprForPersonalitySymbol(
+ const MCSymbol *Sym, unsigned Encoding, MCStreamer &Streamer) const {
+ // On Darwin, we can reference dwarf symbols with foo@GOT-., which
+ // is an indirect pc-relative reference. The default implementation
+ // won't reference using the GOT, so we need this target-specific
+ // version.
+ MCContext &Context = Streamer.getContext();
+ const MCExpr *Res =
+ MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_GOT, Context);
+ MCSymbol *PCSym = Context.createTempSymbol();
+ Streamer.EmitLabel(PCSym);
+ const MCExpr *PC = MCSymbolRefExpr::create(PCSym, Context);
+ return MCBinaryExpr::createSub(Res, PC, Context);
+}
+
+AArch64MCAsmInfoELF::AArch64MCAsmInfoELF(const Triple &T) {
+ if (T.getArch() == Triple::aarch64_be)
+ IsLittleEndian = false;
+
+ // We prefer NEON instructions to be printed in the generic form when
+ // targeting ELF.
+ AssemblerDialect = AsmWriterVariant == Default ? Generic : AsmWriterVariant;
+
+ CodePointerSize = 8;
+
+ // ".comm align is in bytes but .align is pow-2."
+ AlignmentIsInBytes = false;
+
+ CommentString = "//";
+ PrivateGlobalPrefix = ".L";
+ PrivateLabelPrefix = ".L";
+ Code32Directive = ".code\t32";
+
+ Data16bitsDirective = "\t.hword\t";
+ Data32bitsDirective = "\t.word\t";
+ Data64bitsDirective = "\t.xword\t";
+
+ UseDataRegionDirectives = false;
+
+ WeakRefDirective = "\t.weak\t";
+
+ SupportsDebugInformation = true;
+
+ // Exceptions handling
+ ExceptionsType = ExceptionHandling::DwarfCFI;
+
+ UseIntegratedAssembler = true;
+
+ HasIdentDirective = true;
+}
+
+AArch64MCAsmInfoMicrosoftCOFF::AArch64MCAsmInfoMicrosoftCOFF() {
+ PrivateGlobalPrefix = ".L";
+ PrivateLabelPrefix = ".L";
+
+ Data16bitsDirective = "\t.hword\t";
+ Data32bitsDirective = "\t.word\t";
+ Data64bitsDirective = "\t.xword\t";
+
+ AlignmentIsInBytes = false;
+ SupportsDebugInformation = true;
+ CodePointerSize = 8;
+
+ CommentString = ";";
+ ExceptionsType = ExceptionHandling::WinEH;
+ WinEHEncodingType = WinEH::EncodingType::Itanium;
+}
+
+AArch64MCAsmInfoGNUCOFF::AArch64MCAsmInfoGNUCOFF() {
+ PrivateGlobalPrefix = ".L";
+ PrivateLabelPrefix = ".L";
+
+ Data16bitsDirective = "\t.hword\t";
+ Data32bitsDirective = "\t.word\t";
+ Data64bitsDirective = "\t.xword\t";
+
+ AlignmentIsInBytes = false;
+ SupportsDebugInformation = true;
+ CodePointerSize = 8;
+
+ CommentString = "//";
+ ExceptionsType = ExceptionHandling::WinEH;
+ WinEHEncodingType = WinEH::EncodingType::Itanium;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
new file mode 100644
index 000000000000..36ae92afc8c1
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
@@ -0,0 +1,46 @@
+//=====-- AArch64MCAsmInfo.h - AArch64 asm properties ---------*- C++ -*--====//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the AArch64MCAsmInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCASMINFO_H
+#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCASMINFO_H
+
+#include "llvm/MC/MCAsmInfoCOFF.h"
+#include "llvm/MC/MCAsmInfoDarwin.h"
+#include "llvm/MC/MCAsmInfoELF.h"
+
+namespace llvm {
+class MCStreamer;
+class Target;
+class Triple;
+
+struct AArch64MCAsmInfoDarwin : public MCAsmInfoDarwin {
+ explicit AArch64MCAsmInfoDarwin();
+ const MCExpr *
+ getExprForPersonalitySymbol(const MCSymbol *Sym, unsigned Encoding,
+ MCStreamer &Streamer) const override;
+};
+
+struct AArch64MCAsmInfoELF : public MCAsmInfoELF {
+ explicit AArch64MCAsmInfoELF(const Triple &T);
+};
+
+struct AArch64MCAsmInfoMicrosoftCOFF : public MCAsmInfoMicrosoft {
+ explicit AArch64MCAsmInfoMicrosoftCOFF();
+};
+
+struct AArch64MCAsmInfoGNUCOFF : public MCAsmInfoGNUCOFF {
+ explicit AArch64MCAsmInfoGNUCOFF();
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
new file mode 100644
index 000000000000..8cb7a1672983
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
@@ -0,0 +1,649 @@
+//=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the AArch64MCCodeEmitter class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/AArch64AddressingModes.h"
+#include "MCTargetDesc/AArch64FixupKinds.h"
+#include "MCTargetDesc/AArch64MCExpr.h"
+#include "Utils/AArch64BaseInfo.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstdint>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "mccodeemitter"
+
+STATISTIC(MCNumEmitted, "Number of MC instructions emitted.");
+STATISTIC(MCNumFixups, "Number of MC fixups created.");
+
+namespace {
+
+class AArch64MCCodeEmitter : public MCCodeEmitter {
+ MCContext &Ctx;
+ const MCInstrInfo &MCII;
+
+public:
+ AArch64MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
+ : Ctx(ctx), MCII(mcii) {}
+ AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) = delete;
+ void operator=(const AArch64MCCodeEmitter &) = delete;
+ ~AArch64MCCodeEmitter() override = default;
+
+ // getBinaryCodeForInstr - TableGen'erated function for getting the
+ // binary encoding for an instruction.
+ uint64_t getBinaryCodeForInstr(const MCInst &MI,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getMachineOpValue - Return binary encoding of operand. If the machine
+ /// operand requires relocation, record the relocation and return zero.
+ unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getLdStUImm12OpValue - Return encoding info for 12-bit unsigned immediate
+ /// attached to a load, store or prfm instruction. If operand requires a
+ /// relocation, record it and return zero in that part of the encoding.
+ template <uint32_t FixupKind>
+ uint32_t getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label
+ /// target.
+ uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and
+ /// the 2-bit shift field.
+ uint32_t getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getCondBranchTargetOpValue - Return the encoded value for a conditional
+ /// branch target.
+ uint32_t getCondBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getLoadLiteralOpValue - Return the encoded value for a load-literal
+ /// pc-relative address.
+ uint32_t getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getMemExtendOpValue - Return the encoded value for a reg-extend load/store
+ /// instruction: bit 0 is whether a shift is present, bit 1 is whether the
+ /// operation is a sign extend (as opposed to a zero extend).
+ uint32_t getMemExtendOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and-
+ /// branch target.
+ uint32_t getTestBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getBranchTargetOpValue - Return the encoded value for an unconditional
+ /// branch target.
+ uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getMoveWideImmOpValue - Return the encoded value for the immediate operand
+ /// of a MOVZ or MOVK instruction.
+ uint32_t getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getVecShifterOpValue - Return the encoded value for the vector shifter.
+ uint32_t getVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getMoveVecShifterOpValue - Return the encoded value for the vector move
+ /// shifter (MSL).
+ uint32_t getMoveVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getFixedPointScaleOpValue - Return the encoded value for the
+ // FP-to-fixed-point scale factor.
+ uint32_t getFixedPointScaleOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ uint32_t getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ uint32_t getImm8OptLsl(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getSVEIncDecImm(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const;
+
+ void encodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
+
+ unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const;
+
+ template<int hasRs, int hasRt2> unsigned
+ fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const;
+
+ unsigned fixOneOperandFPComparison(const MCInst &MI, unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const;
+
+private:
+ FeatureBitset computeAvailableFeatures(const FeatureBitset &FB) const;
+ void
+ verifyInstructionPredicates(const MCInst &MI,
+ const FeatureBitset &AvailableFeatures) const;
+};
+
+} // end anonymous namespace
+
+/// getMachineOpValue - Return binary encoding of operand. If the machine
+/// operand requires relocation, record the relocation and return zero.
+unsigned
+AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ if (MO.isReg())
+ return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
+
+ assert(MO.isImm() && "did not expect relocated expression");
+ return static_cast<unsigned>(MO.getImm());
+}
+
+template<unsigned FixupKind> uint32_t
+AArch64MCCodeEmitter::getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ uint32_t ImmVal = 0;
+
+ if (MO.isImm())
+ ImmVal = static_cast<uint32_t>(MO.getImm());
+ else {
+ assert(MO.isExpr() && "unable to encode load/store imm operand");
+ MCFixupKind Kind = MCFixupKind(FixupKind);
+ Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
+ ++MCNumFixups;
+ }
+
+ return ImmVal;
+}
+
+/// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label
+/// target.
+uint32_t
+AArch64MCCodeEmitter::getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+
+ // If the destination is an immediate, we have nothing to do.
+ if (MO.isImm())
+ return MO.getImm();
+ assert(MO.isExpr() && "Unexpected target type!");
+ const MCExpr *Expr = MO.getExpr();
+
+ MCFixupKind Kind = MI.getOpcode() == AArch64::ADR
+ ? MCFixupKind(AArch64::fixup_aarch64_pcrel_adr_imm21)
+ : MCFixupKind(AArch64::fixup_aarch64_pcrel_adrp_imm21);
+ Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
+
+ MCNumFixups += 1;
+
+ // All of the information is in the fixup.
+ return 0;
+}
+
+/// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and
+/// the 2-bit shift field. The shift field is stored in bits 13-14 of the
+/// return value.
+uint32_t
+AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ // Suboperands are [imm, shifter].
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
+ assert(AArch64_AM::getShiftType(MO1.getImm()) == AArch64_AM::LSL &&
+ "unexpected shift type for add/sub immediate");
+ unsigned ShiftVal = AArch64_AM::getShiftValue(MO1.getImm());
+ assert((ShiftVal == 0 || ShiftVal == 12) &&
+ "unexpected shift value for add/sub immediate");
+ if (MO.isImm())
+ return MO.getImm() | (ShiftVal == 0 ? 0 : (1 << ShiftVal));
+ assert(MO.isExpr() && "Unable to encode MCOperand!");
+ const MCExpr *Expr = MO.getExpr();
+
+ // Encode the 12 bits of the fixup.
+ MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_add_imm12);
+ Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
+
+ ++MCNumFixups;
+
+ // Set the shift bit of the add instruction for relocation types
+ // R_AARCH64_TLSLE_ADD_TPREL_HI12 and R_AARCH64_TLSLD_ADD_DTPREL_HI12.
+ if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(Expr)) {
+ AArch64MCExpr::VariantKind RefKind = A64E->getKind();
+ if (RefKind == AArch64MCExpr::VK_TPREL_HI12 ||
+ RefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
+ RefKind == AArch64MCExpr::VK_SECREL_HI12)
+ ShiftVal = 12;
+ }
+ return ShiftVal == 0 ? 0 : (1 << ShiftVal);
+}
+
+/// getCondBranchTargetOpValue - Return the encoded value for a conditional
+/// branch target.
+uint32_t AArch64MCCodeEmitter::getCondBranchTargetOpValue(
+ const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+
+ // If the destination is an immediate, we have nothing to do.
+ if (MO.isImm())
+ return MO.getImm();
+ assert(MO.isExpr() && "Unexpected target type!");
+
+ MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch19);
+ Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
+
+ ++MCNumFixups;
+
+ // All of the information is in the fixup.
+ return 0;
+}
+
+/// getLoadLiteralOpValue - Return the encoded value for a load-literal
+/// pc-relative address.
+uint32_t
+AArch64MCCodeEmitter::getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+
+ // If the destination is an immediate, we have nothing to do.
+ if (MO.isImm())
+ return MO.getImm();
+ assert(MO.isExpr() && "Unexpected target type!");
+
+ MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_ldr_pcrel_imm19);
+ Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
+
+ ++MCNumFixups;
+
+ // All of the information is in the fixup.
+ return 0;
+}
+
+uint32_t
+AArch64MCCodeEmitter::getMemExtendOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ unsigned SignExtend = MI.getOperand(OpIdx).getImm();
+ unsigned DoShift = MI.getOperand(OpIdx + 1).getImm();
+ return (SignExtend << 1) | DoShift;
+}
+
+uint32_t
+AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+
+ if (MO.isImm())
+ return MO.getImm();
+ assert(MO.isExpr() && "Unexpected movz/movk immediate");
+
+ Fixups.push_back(MCFixup::create(
+ 0, MO.getExpr(), MCFixupKind(AArch64::fixup_aarch64_movw), MI.getLoc()));
+
+ ++MCNumFixups;
+
+ return 0;
+}
+
+/// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and-
+/// branch target.
+uint32_t AArch64MCCodeEmitter::getTestBranchTargetOpValue(
+ const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+
+ // If the destination is an immediate, we have nothing to do.
+ if (MO.isImm())
+ return MO.getImm();
+ assert(MO.isExpr() && "Unexpected ADR target type!");
+
+ MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch14);
+ Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
+
+ ++MCNumFixups;
+
+ // All of the information is in the fixup.
+ return 0;
+}
+
+/// getBranchTargetOpValue - Return the encoded value for an unconditional
+/// branch target.
+uint32_t
+AArch64MCCodeEmitter::getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+
+ // If the destination is an immediate, we have nothing to do.
+ if (MO.isImm())
+ return MO.getImm();
+ assert(MO.isExpr() && "Unexpected ADR target type!");
+
+ MCFixupKind Kind = MI.getOpcode() == AArch64::BL
+ ? MCFixupKind(AArch64::fixup_aarch64_pcrel_call26)
+ : MCFixupKind(AArch64::fixup_aarch64_pcrel_branch26);
+ Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
+
+ ++MCNumFixups;
+
+ // All of the information is in the fixup.
+ return 0;
+}
+
+/// getVecShifterOpValue - Return the encoded value for the vector shifter:
+///
+/// 00 -> 0
+/// 01 -> 8
+/// 10 -> 16
+/// 11 -> 24
+uint32_t
+AArch64MCCodeEmitter::getVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the shift amount!");
+
+ switch (MO.getImm()) {
+ default:
+ break;
+ case 0:
+ return 0;
+ case 8:
+ return 1;
+ case 16:
+ return 2;
+ case 24:
+ return 3;
+ }
+
+ llvm_unreachable("Invalid value for vector shift amount!");
+}
+
+/// getFixedPointScaleOpValue - Return the encoded value for the
+// FP-to-fixed-point scale factor.
+uint32_t AArch64MCCodeEmitter::getFixedPointScaleOpValue(
+ const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return 64 - MO.getImm();
+}
+
+uint32_t
+AArch64MCCodeEmitter::getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return 64 - MO.getImm();
+}
+
+uint32_t
+AArch64MCCodeEmitter::getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return 32 - MO.getImm();
+}
+
+uint32_t
+AArch64MCCodeEmitter::getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return 16 - MO.getImm();
+}
+
+uint32_t
+AArch64MCCodeEmitter::getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return 8 - MO.getImm();
+}
+
+uint32_t
+AArch64MCCodeEmitter::getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return MO.getImm() - 64;
+}
+
+uint32_t
+AArch64MCCodeEmitter::getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return MO.getImm() - 32;
+}
+
+uint32_t
+AArch64MCCodeEmitter::getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return MO.getImm() - 16;
+}
+
+uint32_t
+AArch64MCCodeEmitter::getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return MO.getImm() - 8;
+}
+
+uint32_t
+AArch64MCCodeEmitter::getImm8OptLsl(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ // Test shift
+ auto ShiftOpnd = MI.getOperand(OpIdx + 1).getImm();
+ assert(AArch64_AM::getShiftType(ShiftOpnd) == AArch64_AM::LSL &&
+ "Unexpected shift type for imm8_opt_lsl immediate.");
+
+ unsigned ShiftVal = AArch64_AM::getShiftValue(ShiftOpnd);
+ assert((ShiftVal == 0 || ShiftVal == 8) &&
+ "Unexpected shift value for imm8_opt_lsl immediate.");
+
+ // Test immediate
+ auto Immediate = MI.getOperand(OpIdx).getImm();
+ return (Immediate & 0xff) | (ShiftVal == 0 ? 0 : (1 << ShiftVal));
+}
+
+uint32_t
+AArch64MCCodeEmitter::getSVEIncDecImm(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value!");
+ // Normalize 1-16 range to 0-15.
+ return MO.getImm() - 1;
+}
+
+/// getMoveVecShifterOpValue - Return the encoded value for the vector move
+/// shifter (MSL).
+uint32_t AArch64MCCodeEmitter::getMoveVecShifterOpValue(
+ const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() &&
+ "Expected an immediate value for the move shift amount!");
+ unsigned ShiftVal = AArch64_AM::getShiftValue(MO.getImm());
+ assert((ShiftVal == 8 || ShiftVal == 16) && "Invalid shift amount!");
+ return ShiftVal == 8 ? 0 : 1;
+}
+
+unsigned AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const {
+ // If one of the signed fixup kinds is applied to a MOVZ instruction, the
+ // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
+ // job to ensure that any bits possibly affected by this are 0. This means we
+ // must zero out bit 30 (essentially emitting a MOVN).
+ MCOperand UImm16MO = MI.getOperand(1);
+
+ // Nothing to do if there's no fixup.
+ if (UImm16MO.isImm())
+ return EncodedValue;
+
+ const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
+ switch (A64E->getKind()) {
+ case AArch64MCExpr::VK_DTPREL_G2:
+ case AArch64MCExpr::VK_DTPREL_G1:
+ case AArch64MCExpr::VK_DTPREL_G0:
+ case AArch64MCExpr::VK_GOTTPREL_G1:
+ case AArch64MCExpr::VK_TPREL_G2:
+ case AArch64MCExpr::VK_TPREL_G1:
+ case AArch64MCExpr::VK_TPREL_G0:
+ return EncodedValue & ~(1u << 30);
+ default:
+ // Nothing to do for an unsigned fixup.
+ return EncodedValue;
+ }
+
+
+ return EncodedValue & ~(1u << 30);
+}
+
+void AArch64MCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ verifyInstructionPredicates(MI,
+ computeAvailableFeatures(STI.getFeatureBits()));
+
+ if (MI.getOpcode() == AArch64::TLSDESCCALL) {
+ // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
+ // following (BLR) instruction. It doesn't emit any code itself so it
+ // doesn't go through the normal TableGenerated channels.
+ MCFixupKind Fixup = MCFixupKind(AArch64::fixup_aarch64_tlsdesc_call);
+ Fixups.push_back(MCFixup::create(0, MI.getOperand(0).getExpr(), Fixup));
+ return;
+ } else if (MI.getOpcode() == AArch64::CompilerBarrier) {
+ // This just prevents the compiler from reordering accesses, no actual code.
+ return;
+ }
+
+ uint64_t Binary = getBinaryCodeForInstr(MI, Fixups, STI);
+ support::endian::write<uint32_t>(OS, Binary, support::little);
+ ++MCNumEmitted; // Keep track of the # of mi's emitted.
+}
+
+unsigned
+AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
+ unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const {
+ // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
+ // (i.e. all bits 1) but is ignored by the processor.
+ EncodedValue |= 0x1f << 10;
+ return EncodedValue;
+}
+
+template<int hasRs, int hasRt2> unsigned
+AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
+ unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const {
+ if (!hasRs) EncodedValue |= 0x001F0000;
+ if (!hasRt2) EncodedValue |= 0x00007C00;
+
+ return EncodedValue;
+}
+
+unsigned AArch64MCCodeEmitter::fixOneOperandFPComparison(
+ const MCInst &MI, unsigned EncodedValue, const MCSubtargetInfo &STI) const {
+ // The Rm field of FCMP and friends is unused - it should be assembled
+ // as 0, but is ignored by the processor.
+ EncodedValue &= ~(0x1f << 16);
+ return EncodedValue;
+}
+
+#define ENABLE_INSTR_PREDICATE_VERIFIER
+#include "AArch64GenMCCodeEmitter.inc"
+
+MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ MCContext &Ctx) {
+ return new AArch64MCCodeEmitter(MCII, Ctx);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
new file mode 100644
index 000000000000..0a529321edc8
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
@@ -0,0 +1,147 @@
+//===-- AArch64MCExpr.cpp - AArch64 specific MC expression classes --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation of the assembly expression modifiers
+// accepted by the AArch64 architecture (e.g. ":lo12:", ":gottprel_g1:", ...).
+//
+//===----------------------------------------------------------------------===//
+
+#include "AArch64MCExpr.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "aarch64symbolrefexpr"
+
+const AArch64MCExpr *AArch64MCExpr::create(const MCExpr *Expr, VariantKind Kind,
+ MCContext &Ctx) {
+ return new (Ctx) AArch64MCExpr(Expr, Kind);
+}
+
+StringRef AArch64MCExpr::getVariantKindName() const {
+ switch (static_cast<uint32_t>(getKind())) {
+ case VK_CALL: return "";
+ case VK_LO12: return ":lo12:";
+ case VK_ABS_G3: return ":abs_g3:";
+ case VK_ABS_G2: return ":abs_g2:";
+ case VK_ABS_G2_S: return ":abs_g2_s:";
+ case VK_ABS_G2_NC: return ":abs_g2_nc:";
+ case VK_ABS_G1: return ":abs_g1:";
+ case VK_ABS_G1_S: return ":abs_g1_s:";
+ case VK_ABS_G1_NC: return ":abs_g1_nc:";
+ case VK_ABS_G0: return ":abs_g0:";
+ case VK_ABS_G0_S: return ":abs_g0_s:";
+ case VK_ABS_G0_NC: return ":abs_g0_nc:";
+ case VK_DTPREL_G2: return ":dtprel_g2:";
+ case VK_DTPREL_G1: return ":dtprel_g1:";
+ case VK_DTPREL_G1_NC: return ":dtprel_g1_nc:";
+ case VK_DTPREL_G0: return ":dtprel_g0:";
+ case VK_DTPREL_G0_NC: return ":dtprel_g0_nc:";
+ case VK_DTPREL_HI12: return ":dtprel_hi12:";
+ case VK_DTPREL_LO12: return ":dtprel_lo12:";
+ case VK_DTPREL_LO12_NC: return ":dtprel_lo12_nc:";
+ case VK_TPREL_G2: return ":tprel_g2:";
+ case VK_TPREL_G1: return ":tprel_g1:";
+ case VK_TPREL_G1_NC: return ":tprel_g1_nc:";
+ case VK_TPREL_G0: return ":tprel_g0:";
+ case VK_TPREL_G0_NC: return ":tprel_g0_nc:";
+ case VK_TPREL_HI12: return ":tprel_hi12:";
+ case VK_TPREL_LO12: return ":tprel_lo12:";
+ case VK_TPREL_LO12_NC: return ":tprel_lo12_nc:";
+ case VK_TLSDESC_LO12: return ":tlsdesc_lo12:";
+ case VK_ABS_PAGE: return "";
+ case VK_ABS_PAGE_NC: return ":pg_hi21_nc:";
+ case VK_GOT: return ":got:";
+ case VK_GOT_PAGE: return ":got:";
+ case VK_GOT_LO12: return ":got_lo12:";
+ case VK_GOTTPREL: return ":gottprel:";
+ case VK_GOTTPREL_PAGE: return ":gottprel:";
+ case VK_GOTTPREL_LO12_NC: return ":gottprel_lo12:";
+ case VK_GOTTPREL_G1: return ":gottprel_g1:";
+ case VK_GOTTPREL_G0_NC: return ":gottprel_g0_nc:";
+ case VK_TLSDESC: return "";
+ case VK_TLSDESC_PAGE: return ":tlsdesc:";
+ case VK_SECREL_LO12: return ":secrel_lo12:";
+ case VK_SECREL_HI12: return ":secrel_hi12:";
+ default:
+ llvm_unreachable("Invalid ELF symbol kind");
+ }
+}
+
+void AArch64MCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const {
+ OS << getVariantKindName();
+ Expr->print(OS, MAI);
+}
+
+void AArch64MCExpr::visitUsedExpr(MCStreamer &Streamer) const {
+ Streamer.visitUsedExpr(*getSubExpr());
+}
+
+MCFragment *AArch64MCExpr::findAssociatedFragment() const {
+ llvm_unreachable("FIXME: what goes here?");
+}
+
+bool AArch64MCExpr::evaluateAsRelocatableImpl(MCValue &Res,
+ const MCAsmLayout *Layout,
+ const MCFixup *Fixup) const {
+ if (!getSubExpr()->evaluateAsRelocatable(Res, Layout, Fixup))
+ return false;
+
+ Res =
+ MCValue::get(Res.getSymA(), Res.getSymB(), Res.getConstant(), getKind());
+
+ return true;
+}
+
+static void fixELFSymbolsInTLSFixupsImpl(const MCExpr *Expr, MCAssembler &Asm) {
+ switch (Expr->getKind()) {
+ case MCExpr::Target:
+ llvm_unreachable("Can't handle nested target expression");
+ break;
+ case MCExpr::Constant:
+ break;
+
+ case MCExpr::Binary: {
+ const MCBinaryExpr *BE = cast<MCBinaryExpr>(Expr);
+ fixELFSymbolsInTLSFixupsImpl(BE->getLHS(), Asm);
+ fixELFSymbolsInTLSFixupsImpl(BE->getRHS(), Asm);
+ break;
+ }
+
+ case MCExpr::SymbolRef: {
+ // We're known to be under a TLS fixup, so any symbol should be
+ // modified. There should be only one.
+ const MCSymbolRefExpr &SymRef = *cast<MCSymbolRefExpr>(Expr);
+ cast<MCSymbolELF>(SymRef.getSymbol()).setType(ELF::STT_TLS);
+ break;
+ }
+
+ case MCExpr::Unary:
+ fixELFSymbolsInTLSFixupsImpl(cast<MCUnaryExpr>(Expr)->getSubExpr(), Asm);
+ break;
+ }
+}
+
+void AArch64MCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {
+ switch (getSymbolLoc(Kind)) {
+ default:
+ return;
+ case VK_DTPREL:
+ case VK_GOTTPREL:
+ case VK_TPREL:
+ case VK_TLSDESC:
+ break;
+ }
+
+ fixELFSymbolsInTLSFixupsImpl(getSubExpr(), Asm);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h
new file mode 100644
index 000000000000..ec9c95911628
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h
@@ -0,0 +1,168 @@
+//=--- AArch64MCExpr.h - AArch64 specific MC expression classes ---*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes AArch64-specific MCExprs, used for modifiers like
+// ":lo12:" or ":gottprel_g1:".
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCEXPR_H
+#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCEXPR_H
+
+#include "llvm/MC/MCExpr.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+
+class AArch64MCExpr : public MCTargetExpr {
+public:
+ enum VariantKind {
+ // Symbol locations specifying (roughly speaking) what calculation should be
+ // performed to construct the final address for the relocated
+ // symbol. E.g. direct, via the GOT, ...
+ VK_ABS = 0x001,
+ VK_SABS = 0x002,
+ VK_GOT = 0x003,
+ VK_DTPREL = 0x004,
+ VK_GOTTPREL = 0x005,
+ VK_TPREL = 0x006,
+ VK_TLSDESC = 0x007,
+ VK_SECREL = 0x008,
+ VK_SymLocBits = 0x00f,
+
+ // Variants specifying which part of the final address calculation is
+ // used. E.g. the low 12 bits for an ADD/LDR, the middle 16 bits for a
+ // MOVZ/MOVK.
+ VK_PAGE = 0x010,
+ VK_PAGEOFF = 0x020,
+ VK_HI12 = 0x030,
+ VK_G0 = 0x040,
+ VK_G1 = 0x050,
+ VK_G2 = 0x060,
+ VK_G3 = 0x070,
+ VK_AddressFragBits = 0x0f0,
+
+ // Whether the final relocation is a checked one (where a linker should
+ // perform a range-check on the final address) or not. Note that this field
+ // is unfortunately sometimes omitted from the assembly syntax. E.g. :lo12:
+ // on its own is a non-checked relocation. We side with ELF on being
+ // explicit about this!
+ VK_NC = 0x100,
+
+ // Convenience definitions for referring to specific textual representations
+ // of relocation specifiers. Note that this means the "_NC" is sometimes
+ // omitted in line with assembly syntax here (VK_LO12 rather than VK_LO12_NC
+ // since a user would write ":lo12:").
+ VK_CALL = VK_ABS,
+ VK_ABS_PAGE = VK_ABS | VK_PAGE,
+ VK_ABS_PAGE_NC = VK_ABS | VK_PAGE | VK_NC,
+ VK_ABS_G3 = VK_ABS | VK_G3,
+ VK_ABS_G2 = VK_ABS | VK_G2,
+ VK_ABS_G2_S = VK_SABS | VK_G2,
+ VK_ABS_G2_NC = VK_ABS | VK_G2 | VK_NC,
+ VK_ABS_G1 = VK_ABS | VK_G1,
+ VK_ABS_G1_S = VK_SABS | VK_G1,
+ VK_ABS_G1_NC = VK_ABS | VK_G1 | VK_NC,
+ VK_ABS_G0 = VK_ABS | VK_G0,
+ VK_ABS_G0_S = VK_SABS | VK_G0,
+ VK_ABS_G0_NC = VK_ABS | VK_G0 | VK_NC,
+ VK_LO12 = VK_ABS | VK_PAGEOFF | VK_NC,
+ VK_GOT_LO12 = VK_GOT | VK_PAGEOFF | VK_NC,
+ VK_GOT_PAGE = VK_GOT | VK_PAGE,
+ VK_DTPREL_G2 = VK_DTPREL | VK_G2,
+ VK_DTPREL_G1 = VK_DTPREL | VK_G1,
+ VK_DTPREL_G1_NC = VK_DTPREL | VK_G1 | VK_NC,
+ VK_DTPREL_G0 = VK_DTPREL | VK_G0,
+ VK_DTPREL_G0_NC = VK_DTPREL | VK_G0 | VK_NC,
+ VK_DTPREL_HI12 = VK_DTPREL | VK_HI12,
+ VK_DTPREL_LO12 = VK_DTPREL | VK_PAGEOFF,
+ VK_DTPREL_LO12_NC = VK_DTPREL | VK_PAGEOFF | VK_NC,
+ VK_GOTTPREL_PAGE = VK_GOTTPREL | VK_PAGE,
+ VK_GOTTPREL_LO12_NC = VK_GOTTPREL | VK_PAGEOFF | VK_NC,
+ VK_GOTTPREL_G1 = VK_GOTTPREL | VK_G1,
+ VK_GOTTPREL_G0_NC = VK_GOTTPREL | VK_G0 | VK_NC,
+ VK_TPREL_G2 = VK_TPREL | VK_G2,
+ VK_TPREL_G1 = VK_TPREL | VK_G1,
+ VK_TPREL_G1_NC = VK_TPREL | VK_G1 | VK_NC,
+ VK_TPREL_G0 = VK_TPREL | VK_G0,
+ VK_TPREL_G0_NC = VK_TPREL | VK_G0 | VK_NC,
+ VK_TPREL_HI12 = VK_TPREL | VK_HI12,
+ VK_TPREL_LO12 = VK_TPREL | VK_PAGEOFF,
+ VK_TPREL_LO12_NC = VK_TPREL | VK_PAGEOFF | VK_NC,
+ VK_TLSDESC_LO12 = VK_TLSDESC | VK_PAGEOFF,
+ VK_TLSDESC_PAGE = VK_TLSDESC | VK_PAGE,
+ VK_SECREL_LO12 = VK_SECREL | VK_PAGEOFF,
+ VK_SECREL_HI12 = VK_SECREL | VK_HI12,
+
+ VK_INVALID = 0xfff
+ };
+
+private:
+ const MCExpr *Expr;
+ const VariantKind Kind;
+
+ explicit AArch64MCExpr(const MCExpr *Expr, VariantKind Kind)
+ : Expr(Expr), Kind(Kind) {}
+
+public:
+ /// @name Construction
+ /// @{
+
+ static const AArch64MCExpr *create(const MCExpr *Expr, VariantKind Kind,
+ MCContext &Ctx);
+
+ /// @}
+ /// @name Accessors
+ /// @{
+
+ /// Get the kind of this expression.
+ VariantKind getKind() const { return Kind; }
+
+ /// Get the expression this modifier applies to.
+ const MCExpr *getSubExpr() const { return Expr; }
+
+ /// @}
+ /// @name VariantKind information extractors.
+ /// @{
+
+ static VariantKind getSymbolLoc(VariantKind Kind) {
+ return static_cast<VariantKind>(Kind & VK_SymLocBits);
+ }
+
+ static VariantKind getAddressFrag(VariantKind Kind) {
+ return static_cast<VariantKind>(Kind & VK_AddressFragBits);
+ }
+
+ static bool isNotChecked(VariantKind Kind) { return Kind & VK_NC; }
+
+ /// @}
+
+ /// Convert the variant kind into an ELF-appropriate modifier
+ /// (e.g. ":got:", ":lo12:").
+ StringRef getVariantKindName() const;
+
+ void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const override;
+
+ void visitUsedExpr(MCStreamer &Streamer) const override;
+
+ MCFragment *findAssociatedFragment() const override;
+
+ bool evaluateAsRelocatableImpl(MCValue &Res, const MCAsmLayout *Layout,
+ const MCFixup *Fixup) const override;
+
+ void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override;
+
+ static bool classof(const MCExpr *E) {
+ return E->getKind() == MCExpr::Target;
+ }
+
+ static bool classof(const AArch64MCExpr *) { return true; }
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
new file mode 100644
index 000000000000..df12274d9470
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
@@ -0,0 +1,412 @@
+//===-- AArch64MCTargetDesc.cpp - AArch64 Target Descriptions ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides AArch64 specific target descriptions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AArch64MCTargetDesc.h"
+#include "AArch64ELFStreamer.h"
+#include "AArch64MCAsmInfo.h"
+#include "AArch64WinCOFFStreamer.h"
+#include "MCTargetDesc/AArch64AddressingModes.h"
+#include "MCTargetDesc/AArch64InstPrinter.h"
+#include "TargetInfo/AArch64TargetInfo.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCInstrAnalysis.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+#define GET_INSTRINFO_MC_DESC
+#define GET_INSTRINFO_MC_HELPERS
+#include "AArch64GenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_MC_DESC
+#include "AArch64GenSubtargetInfo.inc"
+
+#define GET_REGINFO_MC_DESC
+#include "AArch64GenRegisterInfo.inc"
+
+static MCInstrInfo *createAArch64MCInstrInfo() {
+ MCInstrInfo *X = new MCInstrInfo();
+ InitAArch64MCInstrInfo(X);
+ return X;
+}
+
+static MCSubtargetInfo *
+createAArch64MCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
+ if (CPU.empty())
+ CPU = "generic";
+
+ return createAArch64MCSubtargetInfoImpl(TT, CPU, FS);
+}
+
+void AArch64_MC::initLLVMToCVRegMapping(MCRegisterInfo *MRI) {
+ // Mapping from CodeView to MC register id.
+ static const struct {
+ codeview::RegisterId CVReg;
+ MCPhysReg Reg;
+ } RegMap[] = {
+ {codeview::RegisterId::ARM64_W0, AArch64::W0},
+ {codeview::RegisterId::ARM64_W1, AArch64::W1},
+ {codeview::RegisterId::ARM64_W2, AArch64::W2},
+ {codeview::RegisterId::ARM64_W3, AArch64::W3},
+ {codeview::RegisterId::ARM64_W4, AArch64::W4},
+ {codeview::RegisterId::ARM64_W5, AArch64::W5},
+ {codeview::RegisterId::ARM64_W6, AArch64::W6},
+ {codeview::RegisterId::ARM64_W7, AArch64::W7},
+ {codeview::RegisterId::ARM64_W8, AArch64::W8},
+ {codeview::RegisterId::ARM64_W9, AArch64::W9},
+ {codeview::RegisterId::ARM64_W10, AArch64::W10},
+ {codeview::RegisterId::ARM64_W11, AArch64::W11},
+ {codeview::RegisterId::ARM64_W12, AArch64::W12},
+ {codeview::RegisterId::ARM64_W13, AArch64::W13},
+ {codeview::RegisterId::ARM64_W14, AArch64::W14},
+ {codeview::RegisterId::ARM64_W15, AArch64::W15},
+ {codeview::RegisterId::ARM64_W16, AArch64::W16},
+ {codeview::RegisterId::ARM64_W17, AArch64::W17},
+ {codeview::RegisterId::ARM64_W18, AArch64::W18},
+ {codeview::RegisterId::ARM64_W19, AArch64::W19},
+ {codeview::RegisterId::ARM64_W20, AArch64::W20},
+ {codeview::RegisterId::ARM64_W21, AArch64::W21},
+ {codeview::RegisterId::ARM64_W22, AArch64::W22},
+ {codeview::RegisterId::ARM64_W23, AArch64::W23},
+ {codeview::RegisterId::ARM64_W24, AArch64::W24},
+ {codeview::RegisterId::ARM64_W25, AArch64::W25},
+ {codeview::RegisterId::ARM64_W26, AArch64::W26},
+ {codeview::RegisterId::ARM64_W27, AArch64::W27},
+ {codeview::RegisterId::ARM64_W28, AArch64::W28},
+ {codeview::RegisterId::ARM64_W29, AArch64::W29},
+ {codeview::RegisterId::ARM64_W30, AArch64::W30},
+ {codeview::RegisterId::ARM64_WZR, AArch64::WZR},
+ {codeview::RegisterId::ARM64_X0, AArch64::X0},
+ {codeview::RegisterId::ARM64_X1, AArch64::X1},
+ {codeview::RegisterId::ARM64_X2, AArch64::X2},
+ {codeview::RegisterId::ARM64_X3, AArch64::X3},
+ {codeview::RegisterId::ARM64_X4, AArch64::X4},
+ {codeview::RegisterId::ARM64_X5, AArch64::X5},
+ {codeview::RegisterId::ARM64_X6, AArch64::X6},
+ {codeview::RegisterId::ARM64_X7, AArch64::X7},
+ {codeview::RegisterId::ARM64_X8, AArch64::X8},
+ {codeview::RegisterId::ARM64_X9, AArch64::X9},
+ {codeview::RegisterId::ARM64_X10, AArch64::X10},
+ {codeview::RegisterId::ARM64_X11, AArch64::X11},
+ {codeview::RegisterId::ARM64_X12, AArch64::X12},
+ {codeview::RegisterId::ARM64_X13, AArch64::X13},
+ {codeview::RegisterId::ARM64_X14, AArch64::X14},
+ {codeview::RegisterId::ARM64_X15, AArch64::X15},
+ {codeview::RegisterId::ARM64_X16, AArch64::X16},
+ {codeview::RegisterId::ARM64_X17, AArch64::X17},
+ {codeview::RegisterId::ARM64_X18, AArch64::X18},
+ {codeview::RegisterId::ARM64_X19, AArch64::X19},
+ {codeview::RegisterId::ARM64_X20, AArch64::X20},
+ {codeview::RegisterId::ARM64_X21, AArch64::X21},
+ {codeview::RegisterId::ARM64_X22, AArch64::X22},
+ {codeview::RegisterId::ARM64_X23, AArch64::X23},
+ {codeview::RegisterId::ARM64_X24, AArch64::X24},
+ {codeview::RegisterId::ARM64_X25, AArch64::X25},
+ {codeview::RegisterId::ARM64_X26, AArch64::X26},
+ {codeview::RegisterId::ARM64_X27, AArch64::X27},
+ {codeview::RegisterId::ARM64_X28, AArch64::X28},
+ {codeview::RegisterId::ARM64_FP, AArch64::FP},
+ {codeview::RegisterId::ARM64_LR, AArch64::LR},
+ {codeview::RegisterId::ARM64_SP, AArch64::SP},
+ {codeview::RegisterId::ARM64_ZR, AArch64::XZR},
+ {codeview::RegisterId::ARM64_NZCV, AArch64::NZCV},
+ {codeview::RegisterId::ARM64_S0, AArch64::S0},
+ {codeview::RegisterId::ARM64_S1, AArch64::S1},
+ {codeview::RegisterId::ARM64_S2, AArch64::S2},
+ {codeview::RegisterId::ARM64_S3, AArch64::S3},
+ {codeview::RegisterId::ARM64_S4, AArch64::S4},
+ {codeview::RegisterId::ARM64_S5, AArch64::S5},
+ {codeview::RegisterId::ARM64_S6, AArch64::S6},
+ {codeview::RegisterId::ARM64_S7, AArch64::S7},
+ {codeview::RegisterId::ARM64_S8, AArch64::S8},
+ {codeview::RegisterId::ARM64_S9, AArch64::S9},
+ {codeview::RegisterId::ARM64_S10, AArch64::S10},
+ {codeview::RegisterId::ARM64_S11, AArch64::S11},
+ {codeview::RegisterId::ARM64_S12, AArch64::S12},
+ {codeview::RegisterId::ARM64_S13, AArch64::S13},
+ {codeview::RegisterId::ARM64_S14, AArch64::S14},
+ {codeview::RegisterId::ARM64_S15, AArch64::S15},
+ {codeview::RegisterId::ARM64_S16, AArch64::S16},
+ {codeview::RegisterId::ARM64_S17, AArch64::S17},
+ {codeview::RegisterId::ARM64_S18, AArch64::S18},
+ {codeview::RegisterId::ARM64_S19, AArch64::S19},
+ {codeview::RegisterId::ARM64_S20, AArch64::S20},
+ {codeview::RegisterId::ARM64_S21, AArch64::S21},
+ {codeview::RegisterId::ARM64_S22, AArch64::S22},
+ {codeview::RegisterId::ARM64_S23, AArch64::S23},
+ {codeview::RegisterId::ARM64_S24, AArch64::S24},
+ {codeview::RegisterId::ARM64_S25, AArch64::S25},
+ {codeview::RegisterId::ARM64_S26, AArch64::S26},
+ {codeview::RegisterId::ARM64_S27, AArch64::S27},
+ {codeview::RegisterId::ARM64_S28, AArch64::S28},
+ {codeview::RegisterId::ARM64_S29, AArch64::S29},
+ {codeview::RegisterId::ARM64_S30, AArch64::S30},
+ {codeview::RegisterId::ARM64_S31, AArch64::S31},
+ {codeview::RegisterId::ARM64_D0, AArch64::D0},
+ {codeview::RegisterId::ARM64_D1, AArch64::D1},
+ {codeview::RegisterId::ARM64_D2, AArch64::D2},
+ {codeview::RegisterId::ARM64_D3, AArch64::D3},
+ {codeview::RegisterId::ARM64_D4, AArch64::D4},
+ {codeview::RegisterId::ARM64_D5, AArch64::D5},
+ {codeview::RegisterId::ARM64_D6, AArch64::D6},
+ {codeview::RegisterId::ARM64_D7, AArch64::D7},
+ {codeview::RegisterId::ARM64_D8, AArch64::D8},
+ {codeview::RegisterId::ARM64_D9, AArch64::D9},
+ {codeview::RegisterId::ARM64_D10, AArch64::D10},
+ {codeview::RegisterId::ARM64_D11, AArch64::D11},
+ {codeview::RegisterId::ARM64_D12, AArch64::D12},
+ {codeview::RegisterId::ARM64_D13, AArch64::D13},
+ {codeview::RegisterId::ARM64_D14, AArch64::D14},
+ {codeview::RegisterId::ARM64_D15, AArch64::D15},
+ {codeview::RegisterId::ARM64_D16, AArch64::D16},
+ {codeview::RegisterId::ARM64_D17, AArch64::D17},
+ {codeview::RegisterId::ARM64_D18, AArch64::D18},
+ {codeview::RegisterId::ARM64_D19, AArch64::D19},
+ {codeview::RegisterId::ARM64_D20, AArch64::D20},
+ {codeview::RegisterId::ARM64_D21, AArch64::D21},
+ {codeview::RegisterId::ARM64_D22, AArch64::D22},
+ {codeview::RegisterId::ARM64_D23, AArch64::D23},
+ {codeview::RegisterId::ARM64_D24, AArch64::D24},
+ {codeview::RegisterId::ARM64_D25, AArch64::D25},
+ {codeview::RegisterId::ARM64_D26, AArch64::D26},
+ {codeview::RegisterId::ARM64_D27, AArch64::D27},
+ {codeview::RegisterId::ARM64_D28, AArch64::D28},
+ {codeview::RegisterId::ARM64_D29, AArch64::D29},
+ {codeview::RegisterId::ARM64_D30, AArch64::D30},
+ {codeview::RegisterId::ARM64_D31, AArch64::D31},
+ {codeview::RegisterId::ARM64_Q0, AArch64::Q0},
+ {codeview::RegisterId::ARM64_Q1, AArch64::Q1},
+ {codeview::RegisterId::ARM64_Q2, AArch64::Q2},
+ {codeview::RegisterId::ARM64_Q3, AArch64::Q3},
+ {codeview::RegisterId::ARM64_Q4, AArch64::Q4},
+ {codeview::RegisterId::ARM64_Q5, AArch64::Q5},
+ {codeview::RegisterId::ARM64_Q6, AArch64::Q6},
+ {codeview::RegisterId::ARM64_Q7, AArch64::Q7},
+ {codeview::RegisterId::ARM64_Q8, AArch64::Q8},
+ {codeview::RegisterId::ARM64_Q9, AArch64::Q9},
+ {codeview::RegisterId::ARM64_Q10, AArch64::Q10},
+ {codeview::RegisterId::ARM64_Q11, AArch64::Q11},
+ {codeview::RegisterId::ARM64_Q12, AArch64::Q12},
+ {codeview::RegisterId::ARM64_Q13, AArch64::Q13},
+ {codeview::RegisterId::ARM64_Q14, AArch64::Q14},
+ {codeview::RegisterId::ARM64_Q15, AArch64::Q15},
+ {codeview::RegisterId::ARM64_Q16, AArch64::Q16},
+ {codeview::RegisterId::ARM64_Q17, AArch64::Q17},
+ {codeview::RegisterId::ARM64_Q18, AArch64::Q18},
+ {codeview::RegisterId::ARM64_Q19, AArch64::Q19},
+ {codeview::RegisterId::ARM64_Q20, AArch64::Q20},
+ {codeview::RegisterId::ARM64_Q21, AArch64::Q21},
+ {codeview::RegisterId::ARM64_Q22, AArch64::Q22},
+ {codeview::RegisterId::ARM64_Q23, AArch64::Q23},
+ {codeview::RegisterId::ARM64_Q24, AArch64::Q24},
+ {codeview::RegisterId::ARM64_Q25, AArch64::Q25},
+ {codeview::RegisterId::ARM64_Q26, AArch64::Q26},
+ {codeview::RegisterId::ARM64_Q27, AArch64::Q27},
+ {codeview::RegisterId::ARM64_Q28, AArch64::Q28},
+ {codeview::RegisterId::ARM64_Q29, AArch64::Q29},
+ {codeview::RegisterId::ARM64_Q30, AArch64::Q30},
+ {codeview::RegisterId::ARM64_Q31, AArch64::Q31},
+
+ };
+ for (unsigned I = 0; I < array_lengthof(RegMap); ++I)
+ MRI->mapLLVMRegToCVReg(RegMap[I].Reg, static_cast<int>(RegMap[I].CVReg));
+}
+
+static MCRegisterInfo *createAArch64MCRegisterInfo(const Triple &Triple) {
+ MCRegisterInfo *X = new MCRegisterInfo();
+ InitAArch64MCRegisterInfo(X, AArch64::LR);
+ AArch64_MC::initLLVMToCVRegMapping(X);
+ return X;
+}
+
+static MCAsmInfo *createAArch64MCAsmInfo(const MCRegisterInfo &MRI,
+ const Triple &TheTriple) {
+ MCAsmInfo *MAI;
+ if (TheTriple.isOSBinFormatMachO())
+ MAI = new AArch64MCAsmInfoDarwin();
+ else if (TheTriple.isWindowsMSVCEnvironment())
+ MAI = new AArch64MCAsmInfoMicrosoftCOFF();
+ else if (TheTriple.isOSBinFormatCOFF())
+ MAI = new AArch64MCAsmInfoGNUCOFF();
+ else {
+ assert(TheTriple.isOSBinFormatELF() && "Invalid target");
+ MAI = new AArch64MCAsmInfoELF(TheTriple);
+ }
+
+ // Initial state of the frame pointer is SP.
+ unsigned Reg = MRI.getDwarfRegNum(AArch64::SP, true);
+ MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, Reg, 0);
+ MAI->addInitialFrameState(Inst);
+
+ return MAI;
+}
+
+static MCInstPrinter *createAArch64MCInstPrinter(const Triple &T,
+ unsigned SyntaxVariant,
+ const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI) {
+ if (SyntaxVariant == 0)
+ return new AArch64InstPrinter(MAI, MII, MRI);
+ if (SyntaxVariant == 1)
+ return new AArch64AppleInstPrinter(MAI, MII, MRI);
+
+ return nullptr;
+}
+
+static MCStreamer *createELFStreamer(const Triple &T, MCContext &Ctx,
+ std::unique_ptr<MCAsmBackend> &&TAB,
+ std::unique_ptr<MCObjectWriter> &&OW,
+ std::unique_ptr<MCCodeEmitter> &&Emitter,
+ bool RelaxAll) {
+ return createAArch64ELFStreamer(Ctx, std::move(TAB), std::move(OW),
+ std::move(Emitter), RelaxAll);
+}
+
+static MCStreamer *createMachOStreamer(MCContext &Ctx,
+ std::unique_ptr<MCAsmBackend> &&TAB,
+ std::unique_ptr<MCObjectWriter> &&OW,
+ std::unique_ptr<MCCodeEmitter> &&Emitter,
+ bool RelaxAll,
+ bool DWARFMustBeAtTheEnd) {
+ return createMachOStreamer(Ctx, std::move(TAB), std::move(OW),
+ std::move(Emitter), RelaxAll, DWARFMustBeAtTheEnd,
+ /*LabelSections*/ true);
+}
+
+static MCStreamer *
+createWinCOFFStreamer(MCContext &Ctx, std::unique_ptr<MCAsmBackend> &&TAB,
+ std::unique_ptr<MCObjectWriter> &&OW,
+ std::unique_ptr<MCCodeEmitter> &&Emitter, bool RelaxAll,
+ bool IncrementalLinkerCompatible) {
+ return createAArch64WinCOFFStreamer(Ctx, std::move(TAB), std::move(OW),
+ std::move(Emitter), RelaxAll,
+ IncrementalLinkerCompatible);
+}
+
+namespace {
+
+class AArch64MCInstrAnalysis : public MCInstrAnalysis {
+public:
+ AArch64MCInstrAnalysis(const MCInstrInfo *Info) : MCInstrAnalysis(Info) {}
+
+ bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size,
+ uint64_t &Target) const override {
+ // Search for a PC-relative argument.
+ // This will handle instructions like bcc (where the first argument is the
+ // condition code) and cbz (where it is a register).
+ const auto &Desc = Info->get(Inst.getOpcode());
+ for (unsigned i = 0, e = Inst.getNumOperands(); i != e; i++) {
+ if (Desc.OpInfo[i].OperandType == MCOI::OPERAND_PCREL) {
+ int64_t Imm = Inst.getOperand(i).getImm() * 4;
+ Target = Addr + Imm;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ std::vector<std::pair<uint64_t, uint64_t>>
+ findPltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents,
+ uint64_t GotPltSectionVA,
+ const Triple &TargetTriple) const override {
+ // Do a lightweight parsing of PLT entries.
+ std::vector<std::pair<uint64_t, uint64_t>> Result;
+ for (uint64_t Byte = 0, End = PltContents.size(); Byte + 7 < End;
+ Byte += 4) {
+ uint32_t Insn = support::endian::read32le(PltContents.data() + Byte);
+ uint64_t Off = 0;
+ // Check for optional bti c that prefixes adrp in BTI enabled entries
+ if (Insn == 0xd503245f) {
+ Off = 4;
+ Insn = support::endian::read32le(PltContents.data() + Byte + Off);
+ }
+ // Check for adrp.
+ if ((Insn & 0x9f000000) != 0x90000000)
+ continue;
+ Off += 4;
+ uint64_t Imm = (((PltSectionVA + Byte) >> 12) << 12) +
+ (((Insn >> 29) & 3) << 12) + (((Insn >> 5) & 0x3ffff) << 14);
+ uint32_t Insn2 =
+ support::endian::read32le(PltContents.data() + Byte + Off);
+ // Check for: ldr Xt, [Xn, #pimm].
+ if (Insn2 >> 22 == 0x3e5) {
+ Imm += ((Insn2 >> 10) & 0xfff) << 3;
+ Result.push_back(std::make_pair(PltSectionVA + Byte, Imm));
+ Byte += 4;
+ }
+ }
+ return Result;
+ }
+};
+
+} // end anonymous namespace
+
+static MCInstrAnalysis *createAArch64InstrAnalysis(const MCInstrInfo *Info) {
+ return new AArch64MCInstrAnalysis(Info);
+}
+
+// Force static initialization.
+extern "C" void LLVMInitializeAArch64TargetMC() {
+ for (Target *T : {&getTheAArch64leTarget(), &getTheAArch64beTarget(),
+ &getTheAArch64_32Target(), &getTheARM64Target(),
+ &getTheARM64_32Target()}) {
+ // Register the MC asm info.
+ RegisterMCAsmInfoFn X(*T, createAArch64MCAsmInfo);
+
+ // Register the MC instruction info.
+ TargetRegistry::RegisterMCInstrInfo(*T, createAArch64MCInstrInfo);
+
+ // Register the MC register info.
+ TargetRegistry::RegisterMCRegInfo(*T, createAArch64MCRegisterInfo);
+
+ // Register the MC subtarget info.
+ TargetRegistry::RegisterMCSubtargetInfo(*T, createAArch64MCSubtargetInfo);
+
+ // Register the MC instruction analyzer.
+ TargetRegistry::RegisterMCInstrAnalysis(*T, createAArch64InstrAnalysis);
+
+ // Register the MC Code Emitter
+ TargetRegistry::RegisterMCCodeEmitter(*T, createAArch64MCCodeEmitter);
+
+ // Register the obj streamers.
+ TargetRegistry::RegisterELFStreamer(*T, createELFStreamer);
+ TargetRegistry::RegisterMachOStreamer(*T, createMachOStreamer);
+ TargetRegistry::RegisterCOFFStreamer(*T, createWinCOFFStreamer);
+
+ // Register the obj target streamer.
+ TargetRegistry::RegisterObjectTargetStreamer(
+ *T, createAArch64ObjectTargetStreamer);
+
+ // Register the asm streamer.
+ TargetRegistry::RegisterAsmTargetStreamer(*T,
+ createAArch64AsmTargetStreamer);
+ // Register the MCInstPrinter.
+ TargetRegistry::RegisterMCInstPrinter(*T, createAArch64MCInstPrinter);
+ }
+
+ // Register the asm backend.
+ for (Target *T : {&getTheAArch64leTarget(), &getTheAArch64_32Target(),
+ &getTheARM64Target(), &getTheARM64_32Target()})
+ TargetRegistry::RegisterMCAsmBackend(*T, createAArch64leAsmBackend);
+ TargetRegistry::RegisterMCAsmBackend(getTheAArch64beTarget(),
+ createAArch64beAsmBackend);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
new file mode 100644
index 000000000000..c84c313c1db0
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
@@ -0,0 +1,89 @@
+//===-- AArch64MCTargetDesc.h - AArch64 Target Descriptions -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides AArch64 specific target descriptions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCTARGETDESC_H
+#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCTARGETDESC_H
+
+#include "llvm/Support/DataTypes.h"
+
+#include <memory>
+
+namespace llvm {
+class formatted_raw_ostream;
+class MCAsmBackend;
+class MCCodeEmitter;
+class MCContext;
+class MCInstrInfo;
+class MCInstPrinter;
+class MCRegisterInfo;
+class MCObjectTargetWriter;
+class MCStreamer;
+class MCSubtargetInfo;
+class MCTargetOptions;
+class MCTargetStreamer;
+class StringRef;
+class Target;
+class Triple;
+class raw_ostream;
+class raw_pwrite_stream;
+
+MCCodeEmitter *createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ MCContext &Ctx);
+MCAsmBackend *createAArch64leAsmBackend(const Target &T,
+ const MCSubtargetInfo &STI,
+ const MCRegisterInfo &MRI,
+ const MCTargetOptions &Options);
+MCAsmBackend *createAArch64beAsmBackend(const Target &T,
+ const MCSubtargetInfo &STI,
+ const MCRegisterInfo &MRI,
+ const MCTargetOptions &Options);
+
+std::unique_ptr<MCObjectTargetWriter>
+createAArch64ELFObjectWriter(uint8_t OSABI, bool IsILP32);
+
+std::unique_ptr<MCObjectTargetWriter>
+createAArch64MachObjectWriter(uint32_t CPUType, uint32_t CPUSubtype,
+ bool IsILP32);
+
+std::unique_ptr<MCObjectTargetWriter> createAArch64WinCOFFObjectWriter();
+
+MCTargetStreamer *createAArch64AsmTargetStreamer(MCStreamer &S,
+ formatted_raw_ostream &OS,
+ MCInstPrinter *InstPrint,
+ bool isVerboseAsm);
+
+MCTargetStreamer *createAArch64ObjectTargetStreamer(MCStreamer &S,
+ const MCSubtargetInfo &STI);
+
+namespace AArch64_MC {
+void initLLVMToCVRegMapping(MCRegisterInfo *MRI);
+}
+
+} // End llvm namespace
+
+// Defines symbolic names for AArch64 registers. This defines a mapping from
+// register name to register number.
+//
+#define GET_REGINFO_ENUM
+#include "AArch64GenRegisterInfo.inc"
+
+// Defines symbolic names for the AArch64 instructions.
+//
+#define GET_INSTRINFO_ENUM
+#define GET_INSTRINFO_MC_HELPER_DECLS
+#include "AArch64GenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_ENUM
+#include "AArch64GenSubtargetInfo.inc"
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp
new file mode 100644
index 000000000000..b3ce5ef22eef
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp
@@ -0,0 +1,411 @@
+//===-- AArch64MachObjectWriter.cpp - ARM Mach Object Writer --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/AArch64FixupKinds.h"
+#include "MCTargetDesc/AArch64MCTargetDesc.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCAsmLayout.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCFragment.h"
+#include "llvm/MC/MCMachObjectWriter.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+#include <cstdint>
+
+using namespace llvm;
+
+namespace {
+
+class AArch64MachObjectWriter : public MCMachObjectTargetWriter {
+ bool getAArch64FixupKindMachOInfo(const MCFixup &Fixup, unsigned &RelocType,
+ const MCSymbolRefExpr *Sym,
+ unsigned &Log2Size, const MCAssembler &Asm);
+
+public:
+ AArch64MachObjectWriter(uint32_t CPUType, uint32_t CPUSubtype, bool IsILP32)
+ : MCMachObjectTargetWriter(!IsILP32 /* is64Bit */, CPUType, CPUSubtype) {}
+
+ void recordRelocation(MachObjectWriter *Writer, MCAssembler &Asm,
+ const MCAsmLayout &Layout, const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) override;
+};
+
+} // end anonymous namespace
+
+bool AArch64MachObjectWriter::getAArch64FixupKindMachOInfo(
+ const MCFixup &Fixup, unsigned &RelocType, const MCSymbolRefExpr *Sym,
+ unsigned &Log2Size, const MCAssembler &Asm) {
+ RelocType = unsigned(MachO::ARM64_RELOC_UNSIGNED);
+ Log2Size = ~0U;
+
+ switch ((unsigned)Fixup.getKind()) {
+ default:
+ return false;
+
+ case FK_Data_1:
+ Log2Size = Log2_32(1);
+ return true;
+ case FK_Data_2:
+ Log2Size = Log2_32(2);
+ return true;
+ case FK_Data_4:
+ Log2Size = Log2_32(4);
+ if (Sym->getKind() == MCSymbolRefExpr::VK_GOT)
+ RelocType = unsigned(MachO::ARM64_RELOC_POINTER_TO_GOT);
+ return true;
+ case FK_Data_8:
+ Log2Size = Log2_32(8);
+ if (Sym->getKind() == MCSymbolRefExpr::VK_GOT)
+ RelocType = unsigned(MachO::ARM64_RELOC_POINTER_TO_GOT);
+ return true;
+ case AArch64::fixup_aarch64_add_imm12:
+ case AArch64::fixup_aarch64_ldst_imm12_scale1:
+ case AArch64::fixup_aarch64_ldst_imm12_scale2:
+ case AArch64::fixup_aarch64_ldst_imm12_scale4:
+ case AArch64::fixup_aarch64_ldst_imm12_scale8:
+ case AArch64::fixup_aarch64_ldst_imm12_scale16:
+ Log2Size = Log2_32(4);
+ switch (Sym->getKind()) {
+ default:
+ return false;
+ case MCSymbolRefExpr::VK_PAGEOFF:
+ RelocType = unsigned(MachO::ARM64_RELOC_PAGEOFF12);
+ return true;
+ case MCSymbolRefExpr::VK_GOTPAGEOFF:
+ RelocType = unsigned(MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12);
+ return true;
+ case MCSymbolRefExpr::VK_TLVPPAGEOFF:
+ RelocType = unsigned(MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12);
+ return true;
+ }
+ case AArch64::fixup_aarch64_pcrel_adrp_imm21:
+ Log2Size = Log2_32(4);
+ // This encompasses the relocation for the whole 21-bit value.
+ switch (Sym->getKind()) {
+ default:
+ Asm.getContext().reportError(Fixup.getLoc(),
+ "ADR/ADRP relocations must be GOT relative");
+ return false;
+ case MCSymbolRefExpr::VK_PAGE:
+ RelocType = unsigned(MachO::ARM64_RELOC_PAGE21);
+ return true;
+ case MCSymbolRefExpr::VK_GOTPAGE:
+ RelocType = unsigned(MachO::ARM64_RELOC_GOT_LOAD_PAGE21);
+ return true;
+ case MCSymbolRefExpr::VK_TLVPPAGE:
+ RelocType = unsigned(MachO::ARM64_RELOC_TLVP_LOAD_PAGE21);
+ return true;
+ }
+ return true;
+ case AArch64::fixup_aarch64_pcrel_branch26:
+ case AArch64::fixup_aarch64_pcrel_call26:
+ Log2Size = Log2_32(4);
+ RelocType = unsigned(MachO::ARM64_RELOC_BRANCH26);
+ return true;
+ }
+}
+
+static bool canUseLocalRelocation(const MCSectionMachO &Section,
+ const MCSymbol &Symbol, unsigned Log2Size) {
+ // Debug info sections can use local relocations.
+ if (Section.hasAttribute(MachO::S_ATTR_DEBUG))
+ return true;
+
+ // Otherwise, only pointer sized relocations are supported.
+ if (Log2Size != 3)
+ return false;
+
+ // But only if they don't point to a few forbidden sections.
+ if (!Symbol.isInSection())
+ return true;
+ const MCSectionMachO &RefSec = cast<MCSectionMachO>(Symbol.getSection());
+ if (RefSec.getType() == MachO::S_CSTRING_LITERALS)
+ return false;
+
+ if (RefSec.getSegmentName() == "__DATA" &&
+ RefSec.getSectionName() == "__objc_classrefs")
+ return false;
+
+ // FIXME: ld64 currently handles internal pointer-sized relocations
+ // incorrectly (applying the addend twice). We should be able to return true
+ // unconditionally by this point when that's fixed.
+ return false;
+}
+
+void AArch64MachObjectWriter::recordRelocation(
+ MachObjectWriter *Writer, MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment *Fragment, const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) {
+ unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind());
+
+ // See <reloc.h>.
+ uint32_t FixupOffset = Layout.getFragmentOffset(Fragment);
+ unsigned Log2Size = 0;
+ int64_t Value = 0;
+ unsigned Index = 0;
+ unsigned Type = 0;
+ unsigned Kind = Fixup.getKind();
+ const MCSymbol *RelSymbol = nullptr;
+
+ FixupOffset += Fixup.getOffset();
+
+ // AArch64 pcrel relocation addends do not include the section offset.
+ if (IsPCRel)
+ FixedValue += FixupOffset;
+
+ // ADRP fixups use relocations for the whole symbol value and only
+ // put the addend in the instruction itself. Clear out any value the
+ // generic code figured out from the sybmol definition.
+ if (Kind == AArch64::fixup_aarch64_pcrel_adrp_imm21)
+ FixedValue = 0;
+
+ // imm19 relocations are for conditional branches, which require
+ // assembler local symbols. If we got here, that's not what we have,
+ // so complain loudly.
+ if (Kind == AArch64::fixup_aarch64_pcrel_branch19) {
+ Asm.getContext().reportError(Fixup.getLoc(),
+ "conditional branch requires assembler-local"
+ " label. '" +
+ Target.getSymA()->getSymbol().getName() +
+ "' is external.");
+ return;
+ }
+
+ // 14-bit branch relocations should only target internal labels, and so
+ // should never get here.
+ if (Kind == AArch64::fixup_aarch64_pcrel_branch14) {
+ Asm.getContext().reportError(Fixup.getLoc(),
+ "Invalid relocation on conditional branch!");
+ return;
+ }
+
+ if (!getAArch64FixupKindMachOInfo(Fixup, Type, Target.getSymA(), Log2Size,
+ Asm)) {
+ Asm.getContext().reportError(Fixup.getLoc(), "unknown AArch64 fixup kind!");
+ return;
+ }
+
+ Value = Target.getConstant();
+
+ if (Target.isAbsolute()) { // constant
+ // FIXME: Should this always be extern?
+ // SymbolNum of 0 indicates the absolute section.
+ Type = MachO::ARM64_RELOC_UNSIGNED;
+
+ if (IsPCRel) {
+ Asm.getContext().reportError(Fixup.getLoc(),
+ "PC relative absolute relocation!");
+ return;
+
+ // FIXME: x86_64 sets the type to a branch reloc here. Should we do
+ // something similar?
+ }
+ } else if (Target.getSymB()) { // A - B + constant
+ const MCSymbol *A = &Target.getSymA()->getSymbol();
+ const MCSymbol *A_Base = Asm.getAtom(*A);
+
+ const MCSymbol *B = &Target.getSymB()->getSymbol();
+ const MCSymbol *B_Base = Asm.getAtom(*B);
+
+ // Check for "_foo@got - .", which comes through here as:
+ // Ltmp0:
+ // ... _foo@got - Ltmp0
+ if (Target.getSymA()->getKind() == MCSymbolRefExpr::VK_GOT &&
+ Target.getSymB()->getKind() == MCSymbolRefExpr::VK_None &&
+ Layout.getSymbolOffset(*B) ==
+ Layout.getFragmentOffset(Fragment) + Fixup.getOffset()) {
+ // SymB is the PC, so use a PC-rel pointer-to-GOT relocation.
+ Type = MachO::ARM64_RELOC_POINTER_TO_GOT;
+ IsPCRel = 1;
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = FixupOffset;
+ MRE.r_word1 = (IsPCRel << 24) | (Log2Size << 25) | (Type << 28);
+ Writer->addRelocation(A_Base, Fragment->getParent(), MRE);
+ return;
+ } else if (Target.getSymA()->getKind() != MCSymbolRefExpr::VK_None ||
+ Target.getSymB()->getKind() != MCSymbolRefExpr::VK_None) {
+ // Otherwise, neither symbol can be modified.
+ Asm.getContext().reportError(Fixup.getLoc(),
+ "unsupported relocation of modified symbol");
+ return;
+ }
+
+ // We don't support PCrel relocations of differences.
+ if (IsPCRel) {
+ Asm.getContext().reportError(Fixup.getLoc(),
+ "unsupported pc-relative relocation of "
+ "difference");
+ return;
+ }
+
+ // AArch64 always uses external relocations. If there is no symbol to use as
+ // a base address (a local symbol with no preceding non-local symbol),
+ // error out.
+ //
+ // FIXME: We should probably just synthesize an external symbol and use
+ // that.
+ if (!A_Base) {
+ Asm.getContext().reportError(
+ Fixup.getLoc(),
+ "unsupported relocation of local symbol '" + A->getName() +
+ "'. Must have non-local symbol earlier in section.");
+ return;
+ }
+ if (!B_Base) {
+ Asm.getContext().reportError(
+ Fixup.getLoc(),
+ "unsupported relocation of local symbol '" + B->getName() +
+ "'. Must have non-local symbol earlier in section.");
+ return;
+ }
+
+ if (A_Base == B_Base && A_Base) {
+ Asm.getContext().reportError(
+ Fixup.getLoc(), "unsupported relocation with identical base");
+ return;
+ }
+
+ Value += (!A->getFragment() ? 0 : Writer->getSymbolAddress(*A, Layout)) -
+ (!A_Base || !A_Base->getFragment() ? 0 : Writer->getSymbolAddress(
+ *A_Base, Layout));
+ Value -= (!B->getFragment() ? 0 : Writer->getSymbolAddress(*B, Layout)) -
+ (!B_Base || !B_Base->getFragment() ? 0 : Writer->getSymbolAddress(
+ *B_Base, Layout));
+
+ Type = MachO::ARM64_RELOC_UNSIGNED;
+
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = FixupOffset;
+ MRE.r_word1 = (IsPCRel << 24) | (Log2Size << 25) | (Type << 28);
+ Writer->addRelocation(A_Base, Fragment->getParent(), MRE);
+
+ RelSymbol = B_Base;
+ Type = MachO::ARM64_RELOC_SUBTRACTOR;
+ } else { // A + constant
+ const MCSymbol *Symbol = &Target.getSymA()->getSymbol();
+ const MCSectionMachO &Section =
+ static_cast<const MCSectionMachO &>(*Fragment->getParent());
+
+ bool CanUseLocalRelocation =
+ canUseLocalRelocation(Section, *Symbol, Log2Size);
+ if (Symbol->isTemporary() && (Value || !CanUseLocalRelocation)) {
+ // Make sure that the symbol is actually in a section here. If it isn't,
+ // emit an error and exit.
+ if (!Symbol->isInSection()) {
+ Asm.getContext().reportError(
+ Fixup.getLoc(),
+ "unsupported relocation of local symbol '" + Symbol->getName() +
+ "'. Must have non-local symbol earlier in section.");
+ return;
+ }
+ const MCSection &Sec = Symbol->getSection();
+ if (!Asm.getContext().getAsmInfo()->isSectionAtomizableBySymbols(Sec))
+ Symbol->setUsedInReloc();
+ }
+
+ const MCSymbol *Base = Asm.getAtom(*Symbol);
+ // If the symbol is a variable it can either be in a section and
+ // we have a base or it is absolute and should have been expanded.
+ assert(!Symbol->isVariable() || Base);
+
+ // Relocations inside debug sections always use local relocations when
+ // possible. This seems to be done because the debugger doesn't fully
+ // understand relocation entries and expects to find values that
+ // have already been fixed up.
+ if (Symbol->isInSection()) {
+ if (Section.hasAttribute(MachO::S_ATTR_DEBUG))
+ Base = nullptr;
+ }
+
+ // AArch64 uses external relocations as much as possible. For debug
+ // sections, and for pointer-sized relocations (.quad), we allow section
+ // relocations. It's code sections that run into trouble.
+ if (Base) {
+ RelSymbol = Base;
+
+ // Add the local offset, if needed.
+ if (Base != Symbol)
+ Value +=
+ Layout.getSymbolOffset(*Symbol) - Layout.getSymbolOffset(*Base);
+ } else if (Symbol->isInSection()) {
+ if (!CanUseLocalRelocation) {
+ Asm.getContext().reportError(
+ Fixup.getLoc(),
+ "unsupported relocation of local symbol '" + Symbol->getName() +
+ "'. Must have non-local symbol earlier in section.");
+ return;
+ }
+ // Adjust the relocation to be section-relative.
+ // The index is the section ordinal (1-based).
+ const MCSection &Sec = Symbol->getSection();
+ Index = Sec.getOrdinal() + 1;
+ Value += Writer->getSymbolAddress(*Symbol, Layout);
+
+ if (IsPCRel)
+ Value -= Writer->getFragmentAddress(Fragment, Layout) +
+ Fixup.getOffset() + (1ULL << Log2Size);
+ } else {
+ llvm_unreachable(
+ "This constant variable should have been expanded during evaluation");
+ }
+ }
+
+ // If the relocation kind is Branch26, Page21, or Pageoff12, any addend
+ // is represented via an Addend relocation, not encoded directly into
+ // the instruction.
+ if ((Type == MachO::ARM64_RELOC_BRANCH26 ||
+ Type == MachO::ARM64_RELOC_PAGE21 ||
+ Type == MachO::ARM64_RELOC_PAGEOFF12) &&
+ Value) {
+ assert((Value & 0xff000000) == 0 && "Added relocation out of range!");
+
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = FixupOffset;
+ MRE.r_word1 =
+ (Index << 0) | (IsPCRel << 24) | (Log2Size << 25) | (Type << 28);
+ Writer->addRelocation(RelSymbol, Fragment->getParent(), MRE);
+
+ // Now set up the Addend relocation.
+ Type = MachO::ARM64_RELOC_ADDEND;
+ Index = Value;
+ RelSymbol = nullptr;
+ IsPCRel = 0;
+ Log2Size = 2;
+
+ // Put zero into the instruction itself. The addend is in the relocation.
+ Value = 0;
+ }
+
+ // If there's any addend left to handle, encode it in the instruction.
+ FixedValue = Value;
+
+ // struct relocation_info (8 bytes)
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = FixupOffset;
+ MRE.r_word1 =
+ (Index << 0) | (IsPCRel << 24) | (Log2Size << 25) | (Type << 28);
+ Writer->addRelocation(RelSymbol, Fragment->getParent(), MRE);
+}
+
+std::unique_ptr<MCObjectTargetWriter>
+llvm::createAArch64MachObjectWriter(uint32_t CPUType, uint32_t CPUSubtype,
+ bool IsILP32) {
+ return llvm::make_unique<AArch64MachObjectWriter>(CPUType, CPUSubtype,
+ IsILP32);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp
new file mode 100644
index 000000000000..f70752f5303f
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp
@@ -0,0 +1,69 @@
+//===- AArch64TargetStreamer.cpp - AArch64TargetStreamer class ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the AArch64TargetStreamer class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AArch64TargetStreamer.h"
+#include "llvm/MC/ConstantPools.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+
+using namespace llvm;
+
+//
+// AArch64TargetStreamer Implemenation
+//
+AArch64TargetStreamer::AArch64TargetStreamer(MCStreamer &S)
+ : MCTargetStreamer(S), ConstantPools(new AssemblerConstantPools()) {}
+
+AArch64TargetStreamer::~AArch64TargetStreamer() = default;
+
+// The constant pool handling is shared by all AArch64TargetStreamer
+// implementations.
+const MCExpr *AArch64TargetStreamer::addConstantPoolEntry(const MCExpr *Expr,
+ unsigned Size,
+ SMLoc Loc) {
+ return ConstantPools->addEntry(Streamer, Expr, Size, Loc);
+}
+
+void AArch64TargetStreamer::emitCurrentConstantPool() {
+ ConstantPools->emitForCurrentSection(Streamer);
+}
+
+// finish() - write out any non-empty assembler constant pools.
+void AArch64TargetStreamer::finish() { ConstantPools->emitAll(Streamer); }
+
+void AArch64TargetStreamer::emitInst(uint32_t Inst) {
+ char Buffer[4];
+
+ // We can't just use EmitIntValue here, as that will swap the
+ // endianness on big-endian systems (instructions are always
+ // little-endian).
+ for (unsigned I = 0; I < 4; ++I) {
+ Buffer[I] = uint8_t(Inst);
+ Inst >>= 8;
+ }
+
+ getStreamer().EmitBytes(StringRef(Buffer, 4));
+}
+
+namespace llvm {
+
+MCTargetStreamer *
+createAArch64ObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) {
+ const Triple &TT = STI.getTargetTriple();
+ if (TT.isOSBinFormatELF())
+ return new AArch64TargetELFStreamer(S);
+ if (TT.isOSBinFormatCOFF())
+ return new AArch64TargetWinCOFFStreamer(S);
+ return nullptr;
+}
+
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h
new file mode 100644
index 000000000000..3a0c5d8318dd
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h
@@ -0,0 +1,110 @@
+//===-- AArch64TargetStreamer.h - AArch64 Target Streamer ------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64TARGETSTREAMER_H
+#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64TARGETSTREAMER_H
+
+#include "llvm/MC/MCStreamer.h"
+
+namespace {
+class AArch64ELFStreamer;
+}
+
+namespace llvm {
+
+class AArch64TargetStreamer : public MCTargetStreamer {
+public:
+ AArch64TargetStreamer(MCStreamer &S);
+ ~AArch64TargetStreamer() override;
+
+ void finish() override;
+
+ /// Callback used to implement the ldr= pseudo.
+ /// Add a new entry to the constant pool for the current section and return an
+ /// MCExpr that can be used to refer to the constant pool location.
+ const MCExpr *addConstantPoolEntry(const MCExpr *, unsigned Size, SMLoc Loc);
+
+ /// Callback used to implemnt the .ltorg directive.
+ /// Emit contents of constant pool for the current section.
+ void emitCurrentConstantPool();
+
+ /// Callback used to implement the .inst directive.
+ virtual void emitInst(uint32_t Inst);
+
+ virtual void EmitARM64WinCFIAllocStack(unsigned Size) {}
+ virtual void EmitARM64WinCFISaveFPLR(int Offset) {}
+ virtual void EmitARM64WinCFISaveFPLRX(int Offset) {}
+ virtual void EmitARM64WinCFISaveReg(unsigned Reg, int Offset) {}
+ virtual void EmitARM64WinCFISaveRegX(unsigned Reg, int Offset) {}
+ virtual void EmitARM64WinCFISaveRegP(unsigned Reg, int Offset) {}
+ virtual void EmitARM64WinCFISaveRegPX(unsigned Reg, int Offset) {}
+ virtual void EmitARM64WinCFISaveFReg(unsigned Reg, int Offset) {}
+ virtual void EmitARM64WinCFISaveFRegX(unsigned Reg, int Offset) {}
+ virtual void EmitARM64WinCFISaveFRegP(unsigned Reg, int Offset) {}
+ virtual void EmitARM64WinCFISaveFRegPX(unsigned Reg, int Offset) {}
+ virtual void EmitARM64WinCFISetFP() {}
+ virtual void EmitARM64WinCFIAddFP(unsigned Size) {}
+ virtual void EmitARM64WinCFINop() {}
+ virtual void EmitARM64WinCFIPrologEnd() {}
+ virtual void EmitARM64WinCFIEpilogStart() {}
+ virtual void EmitARM64WinCFIEpilogEnd() {}
+
+private:
+ std::unique_ptr<AssemblerConstantPools> ConstantPools;
+};
+
+class AArch64TargetELFStreamer : public AArch64TargetStreamer {
+private:
+ AArch64ELFStreamer &getStreamer();
+
+ void emitInst(uint32_t Inst) override;
+
+public:
+ AArch64TargetELFStreamer(MCStreamer &S) : AArch64TargetStreamer(S) {}
+};
+
+class AArch64TargetWinCOFFStreamer : public llvm::AArch64TargetStreamer {
+private:
+ // True if we are processing SEH directives in an epilogue.
+ bool InEpilogCFI = false;
+
+ // Symbol of the current epilog for which we are processing SEH directives.
+ MCSymbol *CurrentEpilog = nullptr;
+public:
+ AArch64TargetWinCOFFStreamer(llvm::MCStreamer &S)
+ : AArch64TargetStreamer(S) {}
+
+ // The unwind codes on ARM64 Windows are documented at
+ // https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling
+ void EmitARM64WinCFIAllocStack(unsigned Size) override;
+ void EmitARM64WinCFISaveFPLR(int Offset) override;
+ void EmitARM64WinCFISaveFPLRX(int Offset) override;
+ void EmitARM64WinCFISaveReg(unsigned Reg, int Offset) override;
+ void EmitARM64WinCFISaveRegX(unsigned Reg, int Offset) override;
+ void EmitARM64WinCFISaveRegP(unsigned Reg, int Offset) override;
+ void EmitARM64WinCFISaveRegPX(unsigned Reg, int Offset) override;
+ void EmitARM64WinCFISaveFReg(unsigned Reg, int Offset) override;
+ void EmitARM64WinCFISaveFRegX(unsigned Reg, int Offset) override;
+ void EmitARM64WinCFISaveFRegP(unsigned Reg, int Offset) override;
+ void EmitARM64WinCFISaveFRegPX(unsigned Reg, int Offset) override;
+ void EmitARM64WinCFISetFP() override;
+ void EmitARM64WinCFIAddFP(unsigned Size) override;
+ void EmitARM64WinCFINop() override;
+ void EmitARM64WinCFIPrologEnd() override;
+ void EmitARM64WinCFIEpilogStart() override;
+ void EmitARM64WinCFIEpilogEnd() override;
+private:
+ void EmitARM64WinUnwindCode(unsigned UnwindCode, int Reg, int Offset);
+};
+
+MCTargetStreamer *
+createAArch64ObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI);
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFObjectWriter.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFObjectWriter.cpp
new file mode 100644
index 000000000000..a45880a07427
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFObjectWriter.cpp
@@ -0,0 +1,126 @@
+//= AArch64WinCOFFObjectWriter.cpp - AArch64 Windows COFF Object Writer C++ =//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+
+#include "MCTargetDesc/AArch64FixupKinds.h"
+#include "MCTargetDesc/AArch64MCExpr.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCFixupKindInfo.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/MC/MCWinCOFFObjectWriter.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+
+using namespace llvm;
+
+namespace {
+
+class AArch64WinCOFFObjectWriter : public MCWinCOFFObjectTargetWriter {
+public:
+ AArch64WinCOFFObjectWriter()
+ : MCWinCOFFObjectTargetWriter(COFF::IMAGE_FILE_MACHINE_ARM64) {}
+
+ ~AArch64WinCOFFObjectWriter() override = default;
+
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsCrossSection,
+ const MCAsmBackend &MAB) const override;
+
+ bool recordRelocation(const MCFixup &) const override;
+};
+
+} // end anonymous namespace
+
+unsigned AArch64WinCOFFObjectWriter::getRelocType(
+ MCContext &Ctx, const MCValue &Target, const MCFixup &Fixup,
+ bool IsCrossSection, const MCAsmBackend &MAB) const {
+ auto Modifier = Target.isAbsolute() ? MCSymbolRefExpr::VK_None
+ : Target.getSymA()->getKind();
+ const MCExpr *Expr = Fixup.getValue();
+
+ switch (static_cast<unsigned>(Fixup.getKind())) {
+ default: {
+ const MCFixupKindInfo &Info = MAB.getFixupKindInfo(Fixup.getKind());
+ report_fatal_error(Twine("unsupported relocation type: ") + Info.Name);
+ }
+
+ case FK_Data_4:
+ switch (Modifier) {
+ default:
+ return COFF::IMAGE_REL_ARM64_ADDR32;
+ case MCSymbolRefExpr::VK_COFF_IMGREL32:
+ return COFF::IMAGE_REL_ARM64_ADDR32NB;
+ case MCSymbolRefExpr::VK_SECREL:
+ return COFF::IMAGE_REL_ARM64_SECREL;
+ }
+
+ case FK_Data_8:
+ return COFF::IMAGE_REL_ARM64_ADDR64;
+
+ case FK_SecRel_2:
+ return COFF::IMAGE_REL_ARM64_SECTION;
+
+ case FK_SecRel_4:
+ return COFF::IMAGE_REL_ARM64_SECREL;
+
+ case AArch64::fixup_aarch64_add_imm12:
+ if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(Expr)) {
+ AArch64MCExpr::VariantKind RefKind = A64E->getKind();
+ if (RefKind == AArch64MCExpr::VK_SECREL_LO12)
+ return COFF::IMAGE_REL_ARM64_SECREL_LOW12A;
+ if (RefKind == AArch64MCExpr::VK_SECREL_HI12)
+ return COFF::IMAGE_REL_ARM64_SECREL_HIGH12A;
+ }
+ return COFF::IMAGE_REL_ARM64_PAGEOFFSET_12A;
+
+ case AArch64::fixup_aarch64_ldst_imm12_scale1:
+ case AArch64::fixup_aarch64_ldst_imm12_scale2:
+ case AArch64::fixup_aarch64_ldst_imm12_scale4:
+ case AArch64::fixup_aarch64_ldst_imm12_scale8:
+ case AArch64::fixup_aarch64_ldst_imm12_scale16:
+ if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(Expr)) {
+ AArch64MCExpr::VariantKind RefKind = A64E->getKind();
+ if (RefKind == AArch64MCExpr::VK_SECREL_LO12)
+ return COFF::IMAGE_REL_ARM64_SECREL_LOW12L;
+ }
+ return COFF::IMAGE_REL_ARM64_PAGEOFFSET_12L;
+
+ case AArch64::fixup_aarch64_pcrel_adr_imm21:
+ return COFF::IMAGE_REL_ARM64_REL21;
+
+ case AArch64::fixup_aarch64_pcrel_adrp_imm21:
+ return COFF::IMAGE_REL_ARM64_PAGEBASE_REL21;
+
+ case AArch64::fixup_aarch64_pcrel_branch14:
+ return COFF::IMAGE_REL_ARM64_BRANCH14;
+
+ case AArch64::fixup_aarch64_pcrel_branch19:
+ return COFF::IMAGE_REL_ARM64_BRANCH19;
+
+ case AArch64::fixup_aarch64_pcrel_branch26:
+ case AArch64::fixup_aarch64_pcrel_call26:
+ return COFF::IMAGE_REL_ARM64_BRANCH26;
+ }
+}
+
+bool AArch64WinCOFFObjectWriter::recordRelocation(const MCFixup &Fixup) const {
+ return true;
+}
+
+namespace llvm {
+
+std::unique_ptr<MCObjectTargetWriter> createAArch64WinCOFFObjectWriter() {
+ return llvm::make_unique<AArch64WinCOFFObjectWriter>();
+}
+
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp
new file mode 100644
index 000000000000..37c6fbb03908
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp
@@ -0,0 +1,201 @@
+//===-- AArch64WinCOFFStreamer.cpp - ARM Target WinCOFF Streamer ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AArch64WinCOFFStreamer.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCWin64EH.h"
+#include "llvm/MC/MCWinCOFFStreamer.h"
+
+using namespace llvm;
+
+namespace {
+
+class AArch64WinCOFFStreamer : public MCWinCOFFStreamer {
+ Win64EH::ARM64UnwindEmitter EHStreamer;
+
+public:
+ AArch64WinCOFFStreamer(MCContext &C, std::unique_ptr<MCAsmBackend> AB,
+ std::unique_ptr<MCCodeEmitter> CE,
+ std::unique_ptr<MCObjectWriter> OW)
+ : MCWinCOFFStreamer(C, std::move(AB), std::move(CE), std::move(OW)) {}
+
+ void EmitWinEHHandlerData(SMLoc Loc) override;
+ void EmitWindowsUnwindTables() override;
+ void FinishImpl() override;
+};
+
+void AArch64WinCOFFStreamer::EmitWinEHHandlerData(SMLoc Loc) {
+ MCStreamer::EmitWinEHHandlerData(Loc);
+
+ // We have to emit the unwind info now, because this directive
+ // actually switches to the .xdata section!
+ EHStreamer.EmitUnwindInfo(*this, getCurrentWinFrameInfo());
+}
+
+void AArch64WinCOFFStreamer::EmitWindowsUnwindTables() {
+ if (!getNumWinFrameInfos())
+ return;
+ EHStreamer.Emit(*this);
+}
+
+void AArch64WinCOFFStreamer::FinishImpl() {
+ EmitFrames(nullptr);
+ EmitWindowsUnwindTables();
+
+ MCWinCOFFStreamer::FinishImpl();
+}
+} // end anonymous namespace
+
+namespace llvm {
+
+// Helper function to common out unwind code setup for those codes that can
+// belong to both prolog and epilog.
+// There are three types of Windows ARM64 SEH codes. They can
+// 1) take no operands: SEH_Nop, SEH_PrologEnd, SEH_EpilogStart, SEH_EpilogEnd
+// 2) take an offset: SEH_StackAlloc, SEH_SaveFPLR, SEH_SaveFPLR_X
+// 3) take a register and an offset/size: all others
+void AArch64TargetWinCOFFStreamer::EmitARM64WinUnwindCode(unsigned UnwindCode,
+ int Reg,
+ int Offset) {
+ auto &S = getStreamer();
+ WinEH::FrameInfo *CurFrame = S.EnsureValidWinFrameInfo(SMLoc());
+ if (!CurFrame)
+ return;
+ MCSymbol *Label = S.EmitCFILabel();
+ auto Inst = WinEH::Instruction(UnwindCode, Label, Reg, Offset);
+ if (InEpilogCFI)
+ CurFrame->EpilogMap[CurrentEpilog].push_back(Inst);
+ else
+ CurFrame->Instructions.push_back(Inst);
+}
+
+void AArch64TargetWinCOFFStreamer::EmitARM64WinCFIAllocStack(unsigned Size) {
+ unsigned Op = Win64EH::UOP_AllocSmall;
+ if (Size >= 16384)
+ Op = Win64EH::UOP_AllocLarge;
+ else if (Size >= 512)
+ Op = Win64EH::UOP_AllocMedium;
+ EmitARM64WinUnwindCode(Op, -1, Size);
+}
+
+void AArch64TargetWinCOFFStreamer::EmitARM64WinCFISaveFPLR(int Offset) {
+ EmitARM64WinUnwindCode(Win64EH::UOP_SaveFPLR, -1, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::EmitARM64WinCFISaveFPLRX(int Offset) {
+ EmitARM64WinUnwindCode(Win64EH::UOP_SaveFPLRX, -1, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::EmitARM64WinCFISaveReg(unsigned Reg,
+ int Offset) {
+ assert(Offset >= 0 && Offset <= 504 &&
+ "Offset for save reg should be >= 0 && <= 504");
+ EmitARM64WinUnwindCode(Win64EH::UOP_SaveReg, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::EmitARM64WinCFISaveRegX(unsigned Reg,
+ int Offset) {
+ EmitARM64WinUnwindCode(Win64EH::UOP_SaveRegX, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::EmitARM64WinCFISaveRegP(unsigned Reg,
+ int Offset) {
+ EmitARM64WinUnwindCode(Win64EH::UOP_SaveRegP, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::EmitARM64WinCFISaveRegPX(unsigned Reg,
+ int Offset) {
+ EmitARM64WinUnwindCode(Win64EH::UOP_SaveRegPX, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::EmitARM64WinCFISaveFReg(unsigned Reg,
+ int Offset) {
+ assert(Offset >= 0 && Offset <= 504 &&
+ "Offset for save reg should be >= 0 && <= 504");
+ EmitARM64WinUnwindCode(Win64EH::UOP_SaveFReg, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::EmitARM64WinCFISaveFRegX(unsigned Reg,
+ int Offset) {
+ EmitARM64WinUnwindCode(Win64EH::UOP_SaveFRegX, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::EmitARM64WinCFISaveFRegP(unsigned Reg,
+ int Offset) {
+ EmitARM64WinUnwindCode(Win64EH::UOP_SaveFRegP, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::EmitARM64WinCFISaveFRegPX(unsigned Reg,
+ int Offset) {
+ EmitARM64WinUnwindCode(Win64EH::UOP_SaveFRegPX, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::EmitARM64WinCFISetFP() {
+ EmitARM64WinUnwindCode(Win64EH::UOP_SetFP, -1, 0);
+}
+
+void AArch64TargetWinCOFFStreamer::EmitARM64WinCFIAddFP(unsigned Offset) {
+ assert(Offset <= 2040 && "UOP_AddFP must have offset <= 2040");
+ EmitARM64WinUnwindCode(Win64EH::UOP_AddFP, -1, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::EmitARM64WinCFINop() {
+ EmitARM64WinUnwindCode(Win64EH::UOP_Nop, -1, 0);
+}
+
+// The functions below handle opcodes that can end up in either a prolog or
+// an epilog, but not both.
+void AArch64TargetWinCOFFStreamer::EmitARM64WinCFIPrologEnd() {
+ auto &S = getStreamer();
+ WinEH::FrameInfo *CurFrame = S.EnsureValidWinFrameInfo(SMLoc());
+ if (!CurFrame)
+ return;
+
+ MCSymbol *Label = S.EmitCFILabel();
+ CurFrame->PrologEnd = Label;
+ WinEH::Instruction Inst = WinEH::Instruction(Win64EH::UOP_End, Label, -1, 0);
+ auto it = CurFrame->Instructions.begin();
+ CurFrame->Instructions.insert(it, Inst);
+}
+
+void AArch64TargetWinCOFFStreamer::EmitARM64WinCFIEpilogStart() {
+ auto &S = getStreamer();
+ WinEH::FrameInfo *CurFrame = S.EnsureValidWinFrameInfo(SMLoc());
+ if (!CurFrame)
+ return;
+
+ InEpilogCFI = true;
+ CurrentEpilog = S.EmitCFILabel();
+}
+
+void AArch64TargetWinCOFFStreamer::EmitARM64WinCFIEpilogEnd() {
+ auto &S = getStreamer();
+ WinEH::FrameInfo *CurFrame = S.EnsureValidWinFrameInfo(SMLoc());
+ if (!CurFrame)
+ return;
+
+ InEpilogCFI = false;
+ MCSymbol *Label = S.EmitCFILabel();
+ WinEH::Instruction Inst = WinEH::Instruction(Win64EH::UOP_End, Label, -1, 0);
+ CurFrame->EpilogMap[CurrentEpilog].push_back(Inst);
+ CurrentEpilog = nullptr;
+}
+
+MCWinCOFFStreamer *createAArch64WinCOFFStreamer(
+ MCContext &Context, std::unique_ptr<MCAsmBackend> MAB,
+ std::unique_ptr<MCObjectWriter> OW, std::unique_ptr<MCCodeEmitter> Emitter,
+ bool RelaxAll, bool IncrementalLinkerCompatible) {
+ auto *S = new AArch64WinCOFFStreamer(Context, std::move(MAB),
+ std::move(Emitter), std::move(OW));
+ S->getAssembler().setIncrementalLinkerCompatible(IncrementalLinkerCompatible);
+ return S;
+}
+
+} // end llvm namespace
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.h
new file mode 100644
index 000000000000..8c0656652eed
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.h
@@ -0,0 +1,27 @@
+//===-- AArch64WinCOFFStreamer.h - WinCOFF Streamer for AArch64 -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements WinCOFF streamer information for the AArch64 backend.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64WINCOFFSTREAMER_H
+#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64WINCOFFSTREAMER_H
+
+#include "AArch64TargetStreamer.h"
+#include "llvm/MC/MCWinCOFFStreamer.h"
+
+namespace llvm {
+
+MCWinCOFFStreamer *createAArch64WinCOFFStreamer(
+ MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
+ std::unique_ptr<MCObjectWriter> OW, std::unique_ptr<MCCodeEmitter> Emitter,
+ bool RelaxAll, bool IncrementalLinkerCompatible);
+} // end llvm namespace
+
+#endif