diff options
author | Ed Schouten <ed@FreeBSD.org> | 2009-06-02 17:52:33 +0000 |
---|---|---|
committer | Ed Schouten <ed@FreeBSD.org> | 2009-06-02 17:52:33 +0000 |
commit | 009b1c42aa6266385f2c37e227516b24077e6dd7 (patch) | |
tree | 64ba909838c23261cace781ece27d106134ea451 /include/llvm/Target |
Diffstat (limited to 'include/llvm/Target')
23 files changed, 7524 insertions, 0 deletions
diff --git a/include/llvm/Target/DarwinTargetAsmInfo.h b/include/llvm/Target/DarwinTargetAsmInfo.h new file mode 100644 index 0000000000000..6241ffe29b8f2 --- /dev/null +++ b/include/llvm/Target/DarwinTargetAsmInfo.h @@ -0,0 +1,50 @@ +//===---- DarwinTargetAsmInfo.h - Darwin asm properties ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines target asm properties related what form asm statements +// should take in general on Darwin-based targets +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_DARWIN_TARGET_ASM_INFO_H +#define LLVM_DARWIN_TARGET_ASM_INFO_H + +#include "llvm/Target/TargetAsmInfo.h" + +namespace llvm { + class GlobalValue; + class GlobalVariable; + class Type; + class Mangler; + + struct DarwinTargetAsmInfo: public TargetAsmInfo { + const Section* TextCoalSection; + const Section* ConstTextCoalSection; + const Section* ConstDataCoalSection; + const Section* ConstDataSection; + const Section* DataCoalSection; + const Section* FourByteConstantSection; + const Section* EightByteConstantSection; + const Section* SixteenByteConstantSection; + + explicit DarwinTargetAsmInfo(const TargetMachine &TM); + virtual const Section* SelectSectionForGlobal(const GlobalValue *GV) const; + virtual std::string UniqueSectionForGlobal(const GlobalValue* GV, + SectionKind::Kind kind) const; + virtual bool emitUsedDirectiveFor(const GlobalValue *GV, + Mangler *Mang) const; + const Section* MergeableConstSection(const GlobalVariable *GV) const; + const Section* MergeableConstSection(const Type *Ty) const; + const Section* MergeableStringSection(const GlobalVariable *GV) const; + const Section* SelectSectionForMachineConst(const Type *Ty) const; + }; +} + + +#endif // LLVM_DARWIN_TARGET_ASM_INFO_H diff --git a/include/llvm/Target/ELFTargetAsmInfo.h b/include/llvm/Target/ELFTargetAsmInfo.h new file mode 100644 index 0000000000000..6181e59a0553d --- /dev/null +++ b/include/llvm/Target/ELFTargetAsmInfo.h @@ -0,0 +1,45 @@ +//===---- ELFTargetAsmInfo.h - ELF asm properties ---------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines target asm properties related what form asm statements +// should take in general on ELF-based targets +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ELF_TARGET_ASM_INFO_H +#define LLVM_ELF_TARGET_ASM_INFO_H + +#include "llvm/Target/TargetAsmInfo.h" + +namespace llvm { + class GlobalValue; + class GlobalVariable; + class Type; + + struct ELFTargetAsmInfo: public TargetAsmInfo { + explicit ELFTargetAsmInfo(const TargetMachine &TM); + + SectionKind::Kind SectionKindForGlobal(const GlobalValue *GV) const; + virtual const Section* SelectSectionForGlobal(const GlobalValue *GV) const; + virtual std::string printSectionFlags(unsigned flags) const; + const Section* MergeableConstSection(const GlobalVariable *GV) const; + inline const Section* MergeableConstSection(const Type *Ty) const; + const Section* MergeableStringSection(const GlobalVariable *GV) const; + virtual const Section* + SelectSectionForMachineConst(const Type *Ty) const; + + const Section* DataRelSection; + const Section* DataRelLocalSection; + const Section* DataRelROSection; + const Section* DataRelROLocalSection; + }; +} + + +#endif // LLVM_ELF_TARGET_ASM_INFO_H diff --git a/include/llvm/Target/SubtargetFeature.h b/include/llvm/Target/SubtargetFeature.h new file mode 100644 index 0000000000000..5cfdc023d4399 --- /dev/null +++ b/include/llvm/Target/SubtargetFeature.h @@ -0,0 +1,114 @@ +//===-- llvm/Target/SubtargetFeature.h - CPU characteristics ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines and manages user or tool specified CPU characteristics. +// The intent is to be able to package specific features that should or should +// not be used on a specific target processor. A tool, such as llc, could, as +// as example, gather chip info from the command line, a long with features +// that should be used on that chip. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_SUBTARGETFEATURE_H +#define LLVM_TARGET_SUBTARGETFEATURE_H + +#include <string> +#include <vector> +#include <iosfwd> +#include <cstring> +#include "llvm/Support/DataTypes.h" + +namespace llvm { + +//===----------------------------------------------------------------------===// +/// +/// SubtargetFeatureKV - Used to provide key value pairs for feature and +/// CPU bit flags. +// +struct SubtargetFeatureKV { + const char *Key; // K-V key string + const char *Desc; // Help descriptor + uint32_t Value; // K-V integer value + uint32_t Implies; // K-V bit mask + + // Compare routine for std binary search + bool operator<(const SubtargetFeatureKV &S) const { + return strcmp(Key, S.Key) < 0; + } +}; + +//===----------------------------------------------------------------------===// +/// +/// SubtargetInfoKV - Used to provide key value pairs for CPU and arbitrary +/// pointers. +// +struct SubtargetInfoKV { + const char *Key; // K-V key string + void *Value; // K-V pointer value + + // Compare routine for std binary search + bool operator<(const SubtargetInfoKV &S) const { + return strcmp(Key, S.Key) < 0; + } +}; + +//===----------------------------------------------------------------------===// +/// +/// SubtargetFeatures - Manages the enabling and disabling of subtarget +/// specific features. Features are encoded as a string of the form +/// "cpu,+attr1,+attr2,-attr3,...,+attrN" +/// A comma separates each feature from the next (all lowercase.) +/// The first feature is always the CPU subtype (eg. pentiumm). If the CPU +/// value is "generic" then the CPU subtype should be generic for the target. +/// Each of the remaining features is prefixed with + or - indicating whether +/// that feature should be enabled or disabled contrary to the cpu +/// specification. +/// + +class SubtargetFeatures { + std::vector<std::string> Features; // Subtarget features as a vector +public: + explicit SubtargetFeatures(const std::string &Initial = std::string()); + + /// Features string accessors. + std::string getString() const; + void setString(const std::string &Initial); + + /// Set the CPU string. Replaces previous setting. Setting to "" clears CPU. + void setCPU(const std::string &String); + + /// Setting CPU string only if no string is set. + void setCPUIfNone(const std::string &String); + + /// Returns current CPU string. + const std::string & getCPU() const; + + /// Adding Features. + void AddFeature(const std::string &String, bool IsEnabled = true); + + /// Get feature bits. + uint32_t getBits(const SubtargetFeatureKV *CPUTable, + size_t CPUTableSize, + const SubtargetFeatureKV *FeatureTable, + size_t FeatureTableSize); + + /// Get info pointer + void *getInfo(const SubtargetInfoKV *Table, size_t TableSize); + + /// Print feature string. + void print(std::ostream &OS) const; + void print(std::ostream *OS) const { if (OS) print(*OS); } + + // Dump feature info. + void dump() const; +}; + +} // End namespace llvm + +#endif diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td new file mode 100644 index 0000000000000..3f1cdd27ca394 --- /dev/null +++ b/include/llvm/Target/Target.td @@ -0,0 +1,507 @@ +//===- Target.td - Target Independent TableGen interface ---*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the target-independent interfaces which should be +// implemented by each target which is using a TableGen based code generator. +// +//===----------------------------------------------------------------------===// + +// Include all information about LLVM intrinsics. +include "llvm/Intrinsics.td" + +//===----------------------------------------------------------------------===// +// Register file description - These classes are used to fill in the target +// description classes. + +class RegisterClass; // Forward def + +// Register - You should define one instance of this class for each register +// in the target machine. String n will become the "name" of the register. +class Register<string n> { + string Namespace = ""; + string AsmName = n; + + // SpillSize - If this value is set to a non-zero value, it is the size in + // bits of the spill slot required to hold this register. If this value is + // set to zero, the information is inferred from any register classes the + // register belongs to. + int SpillSize = 0; + + // SpillAlignment - This value is used to specify the alignment required for + // spilling the register. Like SpillSize, this should only be explicitly + // specified if the register is not in a register class. + int SpillAlignment = 0; + + // Aliases - A list of registers that this register overlaps with. A read or + // modification of this register can potentially read or modify the aliased + // registers. + list<Register> Aliases = []; + + // SubRegs - A list of registers that are parts of this register. Note these + // are "immediate" sub-registers and the registers within the list do not + // themselves overlap. e.g. For X86, EAX's SubRegs list contains only [AX], + // not [AX, AH, AL]. + list<Register> SubRegs = []; + + // DwarfNumbers - Numbers used internally by gcc/gdb to identify the register. + // These values can be determined by locating the <target>.h file in the + // directory llvmgcc/gcc/config/<target>/ and looking for REGISTER_NAMES. The + // order of these names correspond to the enumeration used by gcc. A value of + // -1 indicates that the gcc number is undefined and -2 that register number + // is invalid for this mode/flavour. + list<int> DwarfNumbers = []; +} + +// RegisterWithSubRegs - This can be used to define instances of Register which +// need to specify sub-registers. +// List "subregs" specifies which registers are sub-registers to this one. This +// is used to populate the SubRegs and AliasSet fields of TargetRegisterDesc. +// This allows the code generator to be careful not to put two values with +// overlapping live ranges into registers which alias. +class RegisterWithSubRegs<string n, list<Register> subregs> : Register<n> { + let SubRegs = subregs; +} + +// SubRegSet - This can be used to define a specific mapping of registers to +// indices, for use as named subregs of a particular physical register. Each +// register in 'subregs' becomes an addressable subregister at index 'n' of the +// corresponding register in 'regs'. +class SubRegSet<int n, list<Register> regs, list<Register> subregs> { + int index = n; + + list<Register> From = regs; + list<Register> To = subregs; +} + +// RegisterClass - Now that all of the registers are defined, and aliases +// between registers are defined, specify which registers belong to which +// register classes. This also defines the default allocation order of +// registers by register allocators. +// +class RegisterClass<string namespace, list<ValueType> regTypes, int alignment, + list<Register> regList> { + string Namespace = namespace; + + // RegType - Specify the list ValueType of the registers in this register + // class. Note that all registers in a register class must have the same + // ValueTypes. This is a list because some targets permit storing different + // types in same register, for example vector values with 128-bit total size, + // but different count/size of items, like SSE on x86. + // + list<ValueType> RegTypes = regTypes; + + // Size - Specify the spill size in bits of the registers. A default value of + // zero lets tablgen pick an appropriate size. + int Size = 0; + + // Alignment - Specify the alignment required of the registers when they are + // stored or loaded to memory. + // + int Alignment = alignment; + + // CopyCost - This value is used to specify the cost of copying a value + // between two registers in this register class. The default value is one + // meaning it takes a single instruction to perform the copying. A negative + // value means copying is extremely expensive or impossible. + int CopyCost = 1; + + // MemberList - Specify which registers are in this class. If the + // allocation_order_* method are not specified, this also defines the order of + // allocation used by the register allocator. + // + list<Register> MemberList = regList; + + // SubClassList - Specify which register classes correspond to subregisters + // of this class. The order should be by subregister set index. + list<RegisterClass> SubRegClassList = []; + + // MethodProtos/MethodBodies - These members can be used to insert arbitrary + // code into a generated register class. The normal usage of this is to + // overload virtual methods. + code MethodProtos = [{}]; + code MethodBodies = [{}]; +} + + +//===----------------------------------------------------------------------===// +// DwarfRegNum - This class provides a mapping of the llvm register enumeration +// to the register numbering used by gcc and gdb. These values are used by a +// debug information writer (ex. DwarfWriter) to describe where values may be +// located during execution. +class DwarfRegNum<list<int> Numbers> { + // DwarfNumbers - Numbers used internally by gcc/gdb to identify the register. + // These values can be determined by locating the <target>.h file in the + // directory llvmgcc/gcc/config/<target>/ and looking for REGISTER_NAMES. The + // order of these names correspond to the enumeration used by gcc. A value of + // -1 indicates that the gcc number is undefined and -2 that register number is + // invalid for this mode/flavour. + list<int> DwarfNumbers = Numbers; +} + +//===----------------------------------------------------------------------===// +// Pull in the common support for scheduling +// +include "llvm/Target/TargetSchedule.td" + +class Predicate; // Forward def + +//===----------------------------------------------------------------------===// +// Instruction set description - These classes correspond to the C++ classes in +// the Target/TargetInstrInfo.h file. +// +class Instruction { + string Namespace = ""; + + dag OutOperandList; // An dag containing the MI def operand list. + dag InOperandList; // An dag containing the MI use operand list. + string AsmString = ""; // The .s format to print the instruction with. + + // Pattern - Set to the DAG pattern for this instruction, if we know of one, + // otherwise, uninitialized. + list<dag> Pattern; + + // The follow state will eventually be inferred automatically from the + // instruction pattern. + + list<Register> Uses = []; // Default to using no non-operand registers + list<Register> Defs = []; // Default to modifying no non-operand registers + + // Predicates - List of predicates which will be turned into isel matching + // code. + list<Predicate> Predicates = []; + + // Code size. + int CodeSize = 0; + + // Added complexity passed onto matching pattern. + int AddedComplexity = 0; + + // These bits capture information about the high-level semantics of the + // instruction. + bit isReturn = 0; // Is this instruction a return instruction? + bit isBranch = 0; // Is this instruction a branch instruction? + bit isIndirectBranch = 0; // Is this instruction an indirect branch? + bit isBarrier = 0; // Can control flow fall through this instruction? + bit isCall = 0; // Is this instruction a call instruction? + bit canFoldAsLoad = 0; // Can this be folded as a simple memory operand? + bit mayLoad = 0; // Is it possible for this inst to read memory? + bit mayStore = 0; // Is it possible for this inst to write memory? + bit isTwoAddress = 0; // Is this a two address instruction? + bit isConvertibleToThreeAddress = 0; // Can this 2-addr instruction promote? + bit isCommutable = 0; // Is this 3 operand instruction commutable? + bit isTerminator = 0; // Is this part of the terminator for a basic block? + bit isReMaterializable = 0; // Is this instruction re-materializable? + bit isPredicable = 0; // Is this instruction predicable? + bit hasDelaySlot = 0; // Does this instruction have an delay slot? + bit usesCustomDAGSchedInserter = 0; // Pseudo instr needing special help. + bit hasCtrlDep = 0; // Does this instruction r/w ctrl-flow chains? + bit isNotDuplicable = 0; // Is it unsafe to duplicate this instruction? + bit isAsCheapAsAMove = 0; // As cheap (or cheaper) than a move instruction. + + // Side effect flags - When set, the flags have these meanings: + // + // hasSideEffects - The instruction has side effects that are not + // captured by any operands of the instruction or other flags. + // + // mayHaveSideEffects - Some instances of the instruction can have side + // effects. The virtual method "isReallySideEffectFree" is called to + // determine this. Load instructions are an example of where this is + // useful. In general, loads always have side effects. However, loads from + // constant pools don't. Individual back ends make this determination. + // + // neverHasSideEffects - Set on an instruction with no pattern if it has no + // side effects. + bit hasSideEffects = 0; + bit mayHaveSideEffects = 0; + bit neverHasSideEffects = 0; + + InstrItinClass Itinerary = NoItinerary;// Execution steps used for scheduling. + + string Constraints = ""; // OperandConstraint, e.g. $src = $dst. + + /// DisableEncoding - List of operand names (e.g. "$op1,$op2") that should not + /// be encoded into the output machineinstr. + string DisableEncoding = ""; +} + +/// Predicates - These are extra conditionals which are turned into instruction +/// selector matching code. Currently each predicate is just a string. +class Predicate<string cond> { + string CondString = cond; +} + +/// NoHonorSignDependentRounding - This predicate is true if support for +/// sign-dependent-rounding is not enabled. +def NoHonorSignDependentRounding + : Predicate<"!HonorSignDependentRoundingFPMath()">; + +class Requires<list<Predicate> preds> { + list<Predicate> Predicates = preds; +} + +/// ops definition - This is just a simple marker used to identify the operands +/// list for an instruction. outs and ins are identical both syntatically and +/// semantically, they are used to define def operands and use operands to +/// improve readibility. This should be used like this: +/// (outs R32:$dst), (ins R32:$src1, R32:$src2) or something similar. +def ops; +def outs; +def ins; + +/// variable_ops definition - Mark this instruction as taking a variable number +/// of operands. +def variable_ops; + +/// ptr_rc definition - Mark this operand as being a pointer value whose +/// register class is resolved dynamically via a callback to TargetInstrInfo. +/// FIXME: We should probably change this to a class which contain a list of +/// flags. But currently we have but one flag. +def ptr_rc; + +/// unknown definition - Mark this operand as being of unknown type, causing +/// it to be resolved by inference in the context it is used. +def unknown; + +/// Operand Types - These provide the built-in operand types that may be used +/// by a target. Targets can optionally provide their own operand types as +/// needed, though this should not be needed for RISC targets. +class Operand<ValueType ty> { + ValueType Type = ty; + string PrintMethod = "printOperand"; + dag MIOperandInfo = (ops); +} + +def i1imm : Operand<i1>; +def i8imm : Operand<i8>; +def i16imm : Operand<i16>; +def i32imm : Operand<i32>; +def i64imm : Operand<i64>; + +def f32imm : Operand<f32>; +def f64imm : Operand<f64>; + +/// zero_reg definition - Special node to stand for the zero register. +/// +def zero_reg; + +/// PredicateOperand - This can be used to define a predicate operand for an +/// instruction. OpTypes specifies the MIOperandInfo for the operand, and +/// AlwaysVal specifies the value of this predicate when set to "always +/// execute". +class PredicateOperand<ValueType ty, dag OpTypes, dag AlwaysVal> + : Operand<ty> { + let MIOperandInfo = OpTypes; + dag DefaultOps = AlwaysVal; +} + +/// OptionalDefOperand - This is used to define a optional definition operand +/// for an instruction. DefaultOps is the register the operand represents if none +/// is supplied, e.g. zero_reg. +class OptionalDefOperand<ValueType ty, dag OpTypes, dag defaultops> + : Operand<ty> { + let MIOperandInfo = OpTypes; + dag DefaultOps = defaultops; +} + + +// InstrInfo - This class should only be instantiated once to provide parameters +// which are global to the the target machine. +// +class InstrInfo { + // If the target wants to associate some target-specific information with each + // instruction, it should provide these two lists to indicate how to assemble + // the target specific information into the 32 bits available. + // + list<string> TSFlagsFields = []; + list<int> TSFlagsShifts = []; + + // Target can specify its instructions in either big or little-endian formats. + // For instance, while both Sparc and PowerPC are big-endian platforms, the + // Sparc manual specifies its instructions in the format [31..0] (big), while + // PowerPC specifies them using the format [0..31] (little). + bit isLittleEndianEncoding = 0; +} + +// Standard Instructions. +def PHI : Instruction { + let OutOperandList = (ops); + let InOperandList = (ops variable_ops); + let AsmString = "PHINODE"; + let Namespace = "TargetInstrInfo"; +} +def INLINEASM : Instruction { + let OutOperandList = (ops); + let InOperandList = (ops variable_ops); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; +} +def DBG_LABEL : Instruction { + let OutOperandList = (ops); + let InOperandList = (ops i32imm:$id); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let hasCtrlDep = 1; +} +def EH_LABEL : Instruction { + let OutOperandList = (ops); + let InOperandList = (ops i32imm:$id); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let hasCtrlDep = 1; +} +def GC_LABEL : Instruction { + let OutOperandList = (ops); + let InOperandList = (ops i32imm:$id); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let hasCtrlDep = 1; +} +def DECLARE : Instruction { + let OutOperandList = (ops); + let InOperandList = (ops variable_ops); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let hasCtrlDep = 1; +} +def EXTRACT_SUBREG : Instruction { + let OutOperandList = (ops unknown:$dst); + let InOperandList = (ops unknown:$supersrc, i32imm:$subidx); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let neverHasSideEffects = 1; +} +def INSERT_SUBREG : Instruction { + let OutOperandList = (ops unknown:$dst); + let InOperandList = (ops unknown:$supersrc, unknown:$subsrc, i32imm:$subidx); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let neverHasSideEffects = 1; + let Constraints = "$supersrc = $dst"; +} +def IMPLICIT_DEF : Instruction { + let OutOperandList = (ops unknown:$dst); + let InOperandList = (ops); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let neverHasSideEffects = 1; + let isReMaterializable = 1; + let isAsCheapAsAMove = 1; +} +def SUBREG_TO_REG : Instruction { + let OutOperandList = (ops unknown:$dst); + let InOperandList = (ops unknown:$implsrc, unknown:$subsrc, i32imm:$subidx); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let neverHasSideEffects = 1; +} +def COPY_TO_REGCLASS : Instruction { + let OutOperandList = (ops unknown:$dst); + let InOperandList = (ops unknown:$src, i32imm:$regclass); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let neverHasSideEffects = 1; + let isAsCheapAsAMove = 1; +} + +//===----------------------------------------------------------------------===// +// AsmWriter - This class can be implemented by targets that need to customize +// the format of the .s file writer. +// +// Subtargets can have multiple different asmwriters (e.g. AT&T vs Intel syntax +// on X86 for example). +// +class AsmWriter { + // AsmWriterClassName - This specifies the suffix to use for the asmwriter + // class. Generated AsmWriter classes are always prefixed with the target + // name. + string AsmWriterClassName = "AsmPrinter"; + + // InstFormatName - AsmWriters can specify the name of the format string to + // print instructions with. + string InstFormatName = "AsmString"; + + // Variant - AsmWriters can be of multiple different variants. Variants are + // used to support targets that need to emit assembly code in ways that are + // mostly the same for different targets, but have minor differences in + // syntax. If the asmstring contains {|} characters in them, this integer + // will specify which alternative to use. For example "{x|y|z}" with Variant + // == 1, will expand to "y". + int Variant = 0; +} +def DefaultAsmWriter : AsmWriter; + + +//===----------------------------------------------------------------------===// +// Target - This class contains the "global" target information +// +class Target { + // InstructionSet - Instruction set description for this target. + InstrInfo InstructionSet; + + // AssemblyWriters - The AsmWriter instances available for this target. + list<AsmWriter> AssemblyWriters = [DefaultAsmWriter]; +} + +//===----------------------------------------------------------------------===// +// SubtargetFeature - A characteristic of the chip set. +// +class SubtargetFeature<string n, string a, string v, string d, + list<SubtargetFeature> i = []> { + // Name - Feature name. Used by command line (-mattr=) to determine the + // appropriate target chip. + // + string Name = n; + + // Attribute - Attribute to be set by feature. + // + string Attribute = a; + + // Value - Value the attribute to be set to by feature. + // + string Value = v; + + // Desc - Feature description. Used by command line (-mattr=) to display help + // information. + // + string Desc = d; + + // Implies - Features that this feature implies are present. If one of those + // features isn't set, then this one shouldn't be set either. + // + list<SubtargetFeature> Implies = i; +} + +//===----------------------------------------------------------------------===// +// Processor chip sets - These values represent each of the chip sets supported +// by the scheduler. Each Processor definition requires corresponding +// instruction itineraries. +// +class Processor<string n, ProcessorItineraries pi, list<SubtargetFeature> f> { + // Name - Chip set name. Used by command line (-mcpu=) to determine the + // appropriate target chip. + // + string Name = n; + + // ProcItin - The scheduling information for the target processor. + // + ProcessorItineraries ProcItin = pi; + + // Features - list of + list<SubtargetFeature> Features = f; +} + +//===----------------------------------------------------------------------===// +// Pull in the common support for calling conventions. +// +include "llvm/Target/TargetCallingConv.td" + +//===----------------------------------------------------------------------===// +// Pull in the common support for DAG isel generation. +// +include "llvm/Target/TargetSelectionDAG.td" diff --git a/include/llvm/Target/TargetAsmInfo.h b/include/llvm/Target/TargetAsmInfo.h new file mode 100644 index 0000000000000..f223f4765f988 --- /dev/null +++ b/include/llvm/Target/TargetAsmInfo.h @@ -0,0 +1,932 @@ +//===-- llvm/Target/TargetAsmInfo.h - Asm info ------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains a class to be used as the basis for target specific +// asm writers. This class primarily takes care of global printing constants, +// which are used in very similar ways across all targets. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_ASM_INFO_H +#define LLVM_TARGET_ASM_INFO_H + +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/Support/DataTypes.h" +#include <string> + +namespace llvm { + // DWARF encoding query type + namespace DwarfEncoding { + enum Target { + Data = 0, + CodeLabels = 1, + Functions = 2 + }; + } + + namespace SectionKind { + enum Kind { + Unknown = 0, ///< Custom section + Text, ///< Text section + Data, ///< Data section + DataRel, ///< Contains data that has relocations + DataRelLocal, ///< Contains data that has only local relocations + BSS, ///< BSS section + ROData, ///< Readonly data section + DataRelRO, ///< Contains data that is otherwise readonly + DataRelROLocal, ///< Contains r/o data with only local relocations + RODataMergeStr, ///< Readonly data section (mergeable strings) + RODataMergeConst, ///< Readonly data section (mergeable constants) + SmallData, ///< Small data section + SmallBSS, ///< Small bss section + SmallROData, ///< Small readonly section + ThreadData, ///< Initialized TLS data objects + ThreadBSS ///< Uninitialized TLS data objects + }; + + static inline bool isReadOnly(Kind K) { + return (K == SectionKind::ROData || + K == SectionKind::RODataMergeConst || + K == SectionKind::RODataMergeStr || + K == SectionKind::SmallROData); + } + + static inline bool isBSS(Kind K) { + return (K == SectionKind::BSS || + K == SectionKind::SmallBSS); + } + } + + namespace SectionFlags { + const unsigned Invalid = -1U; + const unsigned None = 0; + const unsigned Code = 1 << 0; ///< Section contains code + const unsigned Writeable = 1 << 1; ///< Section is writeable + const unsigned BSS = 1 << 2; ///< Section contains only zeroes + const unsigned Mergeable = 1 << 3; ///< Section contains mergeable data + const unsigned Strings = 1 << 4; ///< Section contains C-type strings + const unsigned TLS = 1 << 5; ///< Section contains thread-local data + const unsigned Debug = 1 << 6; ///< Section contains debug data + const unsigned Linkonce = 1 << 7; ///< Section is linkonce + const unsigned Small = 1 << 8; ///< Section is small + const unsigned TypeFlags = 0xFF; + // Some gap for future flags + const unsigned Named = 1 << 23; ///< Section is named + const unsigned EntitySize = 0xFF << 24; ///< Entity size for mergeable stuff + + static inline unsigned getEntitySize(unsigned Flags) { + return (Flags >> 24) & 0xFF; + } + + static inline unsigned setEntitySize(unsigned Flags, unsigned Size) { + return ((Flags & ~EntitySize) | ((Size & 0xFF) << 24)); + } + + struct KeyInfo { + static inline unsigned getEmptyKey() { return Invalid; } + static inline unsigned getTombstoneKey() { return Invalid - 1; } + static unsigned getHashValue(const unsigned &Key) { return Key; } + static bool isEqual(unsigned LHS, unsigned RHS) { return LHS == RHS; } + static bool isPod() { return true; } + }; + + typedef DenseMap<unsigned, std::string, KeyInfo> FlagsStringsMapType; + } + + class TargetMachine; + class CallInst; + class GlobalValue; + class Type; + class Mangler; + + class Section { + friend class TargetAsmInfo; + friend class StringMapEntry<Section>; + friend class StringMap<Section>; + + std::string Name; + unsigned Flags; + explicit Section(unsigned F = SectionFlags::Invalid):Flags(F) { } + + public: + + bool isNamed() const { return Flags & SectionFlags::Named; } + unsigned getEntitySize() const { return (Flags >> 24) & 0xFF; } + + const std::string& getName() const { return Name; } + unsigned getFlags() const { return Flags; } + }; + + /// TargetAsmInfo - This class is intended to be used as a base class for asm + /// properties and features specific to the target. + class TargetAsmInfo { + private: + mutable StringMap<Section> Sections; + mutable SectionFlags::FlagsStringsMapType FlagsStrings; + void fillDefaultValues(); + protected: + /// TM - The current TargetMachine. + const TargetMachine &TM; + + //===------------------------------------------------------------------===// + // Properties to be set by the target writer, used to configure asm printer. + // + + /// TextSection - Section directive for standard text. + /// + const Section *TextSection; // Defaults to ".text". + + /// DataSection - Section directive for standard data. + /// + const Section *DataSection; // Defaults to ".data". + + /// BSSSection - Section directive for uninitialized data. Null if this + /// target doesn't support a BSS section. + /// + const char *BSSSection; // Default to ".bss". + const Section *BSSSection_; + + /// ReadOnlySection - This is the directive that is emitted to switch to a + /// read-only section for constant data (e.g. data declared const, + /// jump tables). + const Section *ReadOnlySection; // Defaults to NULL + + /// SmallDataSection - This is the directive that is emitted to switch to a + /// small data section. + /// + const Section *SmallDataSection; // Defaults to NULL + + /// SmallBSSSection - This is the directive that is emitted to switch to a + /// small bss section. + /// + const Section *SmallBSSSection; // Defaults to NULL + + /// SmallRODataSection - This is the directive that is emitted to switch to + /// a small read-only data section. + /// + const Section *SmallRODataSection; // Defaults to NULL + + /// TLSDataSection - Section directive for Thread Local data. + /// + const Section *TLSDataSection; // Defaults to ".tdata". + + /// TLSBSSSection - Section directive for Thread Local uninitialized data. + /// Null if this target doesn't support a BSS section. + /// + const Section *TLSBSSSection; // Defaults to ".tbss". + + /// ZeroFillDirective - Directive for emitting a global to the ZeroFill + /// section on this target. Null if this target doesn't support zerofill. + const char *ZeroFillDirective; // Default is null. + + /// NonexecutableStackDirective - Directive for declaring to the + /// linker and beyond that the emitted code does not require stack + /// memory to be executable. + const char *NonexecutableStackDirective; // Default is null. + + /// NeedsSet - True if target asm treats expressions in data directives + /// as linktime-relocatable. For assembly-time computation, we need to + /// use a .set. Thus: + /// .set w, x-y + /// .long w + /// is computed at assembly time, while + /// .long x-y + /// is relocated if the relative locations of x and y change at linktime. + /// We want both these things in different places. + bool NeedsSet; // Defaults to false. + + /// MaxInstLength - This is the maximum possible length of an instruction, + /// which is needed to compute the size of an inline asm. + unsigned MaxInstLength; // Defaults to 4. + + /// PCSymbol - The symbol used to represent the current PC. Used in PC + /// relative expressions. + const char *PCSymbol; // Defaults to "$". + + /// SeparatorChar - This character, if specified, is used to separate + /// instructions from each other when on the same line. This is used to + /// measure inline asm instructions. + char SeparatorChar; // Defaults to ';' + + /// CommentString - This indicates the comment character used by the + /// assembler. + const char *CommentString; // Defaults to "#" + + /// GlobalPrefix - If this is set to a non-empty string, it is prepended + /// onto all global symbols. This is often used for "_" or ".". + const char *GlobalPrefix; // Defaults to "" + + /// PrivateGlobalPrefix - This prefix is used for globals like constant + /// pool entries that are completely private to the .s file and should not + /// have names in the .o file. This is often "." or "L". + const char *PrivateGlobalPrefix; // Defaults to "." + + /// LessPrivateGlobalPrefix - This prefix is used for symbols that should + /// be passed through the assembler but be removed by the linker. This + /// is "l" on Darwin, currently used for some ObjC metadata. + const char *LessPrivateGlobalPrefix; // Defaults to "" + + /// JumpTableSpecialLabelPrefix - If not null, a extra (dead) label is + /// emitted before jump tables with the specified prefix. + const char *JumpTableSpecialLabelPrefix; // Default to null. + + /// GlobalVarAddrPrefix/Suffix - If these are nonempty, these strings + /// will enclose any GlobalVariable (that isn't a function) + /// + const char *GlobalVarAddrPrefix; // Defaults to "" + const char *GlobalVarAddrSuffix; // Defaults to "" + + /// FunctionAddrPrefix/Suffix - If these are nonempty, these strings + /// will enclose any GlobalVariable that points to a function. + /// For example, this is used by the IA64 backend to materialize + /// function descriptors, by decorating the ".data8" object with the + /// @verbatim @fptr( ) @endverbatim + /// link-relocation operator. + /// + const char *FunctionAddrPrefix; // Defaults to "" + const char *FunctionAddrSuffix; // Defaults to "" + + /// PersonalityPrefix/Suffix - If these are nonempty, these strings will + /// enclose any personality function in the common frame section. + /// + const char *PersonalityPrefix; // Defaults to "" + const char *PersonalitySuffix; // Defaults to "" + + /// NeedsIndirectEncoding - If set, we need to set the indirect encoding bit + /// for EH in Dwarf. + /// + bool NeedsIndirectEncoding; // Defaults to false + + /// InlineAsmStart/End - If these are nonempty, they contain a directive to + /// emit before and after an inline assembly statement. + const char *InlineAsmStart; // Defaults to "#APP\n" + const char *InlineAsmEnd; // Defaults to "#NO_APP\n" + + /// AssemblerDialect - Which dialect of an assembler variant to use. + unsigned AssemblerDialect; // Defaults to 0 + + /// StringConstantPrefix - Prefix for FEs to use when generating unnamed + /// constant strings. These names get run through the Mangler later; if + /// you want the Mangler not to add the GlobalPrefix as well, + /// use '\1' as the first character. + const char *StringConstantPrefix; // Defaults to ".str" + + //===--- Data Emission Directives -------------------------------------===// + + /// ZeroDirective - this should be set to the directive used to get some + /// number of zero bytes emitted to the current section. Common cases are + /// "\t.zero\t" and "\t.space\t". If this is set to null, the + /// Data*bitsDirective's will be used to emit zero bytes. + const char *ZeroDirective; // Defaults to "\t.zero\t" + const char *ZeroDirectiveSuffix; // Defaults to "" + + /// AsciiDirective - This directive allows emission of an ascii string with + /// the standard C escape characters embedded into it. + const char *AsciiDirective; // Defaults to "\t.ascii\t" + + /// AscizDirective - If not null, this allows for special handling of + /// zero terminated strings on this target. This is commonly supported as + /// ".asciz". If a target doesn't support this, it can be set to null. + const char *AscizDirective; // Defaults to "\t.asciz\t" + + /// DataDirectives - These directives are used to output some unit of + /// integer data to the current section. If a data directive is set to + /// null, smaller data directives will be used to emit the large sizes. + const char *Data8bitsDirective; // Defaults to "\t.byte\t" + const char *Data16bitsDirective; // Defaults to "\t.short\t" + const char *Data32bitsDirective; // Defaults to "\t.long\t" + const char *Data64bitsDirective; // Defaults to "\t.quad\t" + + /// getASDirective - Targets can override it to provide different data + /// directives for various sizes and non-default address spaces. + virtual const char *getASDirective(unsigned size, + unsigned AS) const { + assert (AS > 0 + && "Dont know the directives for default addr space"); + return NULL; + } + + //===--- Alignment Information ----------------------------------------===// + + /// AlignDirective - The directive used to emit round up to an alignment + /// boundary. + /// + const char *AlignDirective; // Defaults to "\t.align\t" + + /// AlignmentIsInBytes - If this is true (the default) then the asmprinter + /// emits ".align N" directives, where N is the number of bytes to align to. + /// Otherwise, it emits ".align log2(N)", e.g. 3 to align to an 8 byte + /// boundary. + bool AlignmentIsInBytes; // Defaults to true + + /// TextAlignFillValue - If non-zero, this is used to fill the executable + /// space created as the result of a alignment directive. + unsigned TextAlignFillValue; + + //===--- Section Switching Directives ---------------------------------===// + + /// SwitchToSectionDirective - This is the directive used when we want to + /// emit a global to an arbitrary section. The section name is emited after + /// this. + const char *SwitchToSectionDirective; // Defaults to "\t.section\t" + + /// TextSectionStartSuffix - This is printed after each start of section + /// directive for text sections. + const char *TextSectionStartSuffix; // Defaults to "". + + /// DataSectionStartSuffix - This is printed after each start of section + /// directive for data sections. + const char *DataSectionStartSuffix; // Defaults to "". + + /// SectionEndDirectiveSuffix - If non-null, the asm printer will close each + /// section with the section name and this suffix printed. + const char *SectionEndDirectiveSuffix;// Defaults to null. + + /// ConstantPoolSection - This is the section that we SwitchToSection right + /// before emitting the constant pool for a function. + const char *ConstantPoolSection; // Defaults to "\t.section .rodata" + + /// JumpTableDataSection - This is the section that we SwitchToSection right + /// before emitting the jump tables for a function when the relocation model + /// is not PIC. + const char *JumpTableDataSection; // Defaults to "\t.section .rodata" + + /// JumpTableDirective - if non-null, the directive to emit before a jump + /// table. + const char *JumpTableDirective; + + /// CStringSection - If not null, this allows for special handling of + /// cstring constants (null terminated string that does not contain any + /// other null bytes) on this target. This is commonly supported as + /// ".cstring". + const char *CStringSection; // Defaults to NULL + const Section *CStringSection_; + + /// StaticCtorsSection - This is the directive that is emitted to switch to + /// a section to emit the static constructor list. + /// Defaults to "\t.section .ctors,\"aw\",@progbits". + const char *StaticCtorsSection; + + /// StaticDtorsSection - This is the directive that is emitted to switch to + /// a section to emit the static destructor list. + /// Defaults to "\t.section .dtors,\"aw\",@progbits". + const char *StaticDtorsSection; + + //===--- Global Variable Emission Directives --------------------------===// + + /// GlobalDirective - This is the directive used to declare a global entity. + /// + const char *GlobalDirective; // Defaults to NULL. + + /// ExternDirective - This is the directive used to declare external + /// globals. + /// + const char *ExternDirective; // Defaults to NULL. + + /// SetDirective - This is the name of a directive that can be used to tell + /// the assembler to set the value of a variable to some expression. + const char *SetDirective; // Defaults to null. + + /// LCOMMDirective - This is the name of a directive (if supported) that can + /// be used to efficiently declare a local (internal) block of zero + /// initialized data in the .bss/.data section. The syntax expected is: + /// @verbatim <LCOMMDirective> SYMBOLNAME LENGTHINBYTES, ALIGNMENT + /// @endverbatim + const char *LCOMMDirective; // Defaults to null. + + const char *COMMDirective; // Defaults to "\t.comm\t". + + /// COMMDirectiveTakesAlignment - True if COMMDirective take a third + /// argument that specifies the alignment of the declaration. + bool COMMDirectiveTakesAlignment; // Defaults to true. + + /// HasDotTypeDotSizeDirective - True if the target has .type and .size + /// directives, this is true for most ELF targets. + bool HasDotTypeDotSizeDirective; // Defaults to true. + + /// HasSingleParameterDotFile - True if the target has a single parameter + /// .file directive, this is true for ELF targets. + bool HasSingleParameterDotFile; // Defaults to true. + + /// UsedDirective - This directive, if non-null, is used to declare a global + /// as being used somehow that the assembler can't see. This prevents dead + /// code elimination on some targets. + const char *UsedDirective; // Defaults to null. + + /// WeakRefDirective - This directive, if non-null, is used to declare a + /// global as being a weak undefined symbol. + const char *WeakRefDirective; // Defaults to null. + + /// WeakDefDirective - This directive, if non-null, is used to declare a + /// global as being a weak defined symbol. + const char *WeakDefDirective; // Defaults to null. + + /// HiddenDirective - This directive, if non-null, is used to declare a + /// global or function as having hidden visibility. + const char *HiddenDirective; // Defaults to "\t.hidden\t". + + /// ProtectedDirective - This directive, if non-null, is used to declare a + /// global or function as having protected visibility. + const char *ProtectedDirective; // Defaults to "\t.protected\t". + + //===--- Dwarf Emission Directives -----------------------------------===// + + /// AbsoluteDebugSectionOffsets - True if we should emit abolute section + /// offsets for debug information. Defaults to false. + bool AbsoluteDebugSectionOffsets; + + /// AbsoluteEHSectionOffsets - True if we should emit abolute section + /// offsets for EH information. Defaults to false. + bool AbsoluteEHSectionOffsets; + + /// HasLEB128 - True if target asm supports leb128 directives. + /// + bool HasLEB128; // Defaults to false. + + /// hasDotLocAndDotFile - True if target asm supports .loc and .file + /// directives for emitting debugging information. + /// + bool HasDotLocAndDotFile; // Defaults to false. + + /// SupportsDebugInformation - True if target supports emission of debugging + /// information. + bool SupportsDebugInformation; + + /// SupportsExceptionHandling - True if target supports + /// exception handling. + /// + bool SupportsExceptionHandling; // Defaults to false. + + /// RequiresFrameSection - true if the Dwarf2 output needs a frame section + /// + bool DwarfRequiresFrameSection; // Defaults to true. + + /// DwarfUsesInlineInfoSection - True if DwarfDebugInlineSection is used to + /// encode inline subroutine information. + bool DwarfUsesInlineInfoSection; // Defaults to false. + + /// SupportsMacInfo - true if the Dwarf output supports macro information + /// + bool SupportsMacInfoSection; // Defaults to true + + /// NonLocalEHFrameLabel - If set, the EH_frame label needs to be non-local. + /// + bool NonLocalEHFrameLabel; // Defaults to false. + + /// GlobalEHDirective - This is the directive used to make exception frame + /// tables globally visible. + /// + const char *GlobalEHDirective; // Defaults to NULL. + + /// SupportsWeakEmptyEHFrame - True if target assembler and linker will + /// handle a weak_definition of constant 0 for an omitted EH frame. + bool SupportsWeakOmittedEHFrame; // Defaults to true. + + /// DwarfSectionOffsetDirective - Special section offset directive. + const char* DwarfSectionOffsetDirective; // Defaults to NULL + + /// DwarfAbbrevSection - Section directive for Dwarf abbrev. + /// + const char *DwarfAbbrevSection; // Defaults to ".debug_abbrev". + + /// DwarfInfoSection - Section directive for Dwarf info. + /// + const char *DwarfInfoSection; // Defaults to ".debug_info". + + /// DwarfLineSection - Section directive for Dwarf info. + /// + const char *DwarfLineSection; // Defaults to ".debug_line". + + /// DwarfFrameSection - Section directive for Dwarf info. + /// + const char *DwarfFrameSection; // Defaults to ".debug_frame". + + /// DwarfPubNamesSection - Section directive for Dwarf info. + /// + const char *DwarfPubNamesSection; // Defaults to ".debug_pubnames". + + /// DwarfPubTypesSection - Section directive for Dwarf info. + /// + const char *DwarfPubTypesSection; // Defaults to ".debug_pubtypes". + + /// DwarfDebugInlineSection - Section directive for inline info. + /// + const char *DwarfDebugInlineSection; // Defaults to ".debug_inlined" + + /// DwarfStrSection - Section directive for Dwarf info. + /// + const char *DwarfStrSection; // Defaults to ".debug_str". + + /// DwarfLocSection - Section directive for Dwarf info. + /// + const char *DwarfLocSection; // Defaults to ".debug_loc". + + /// DwarfARangesSection - Section directive for Dwarf info. + /// + const char *DwarfARangesSection; // Defaults to ".debug_aranges". + + /// DwarfRangesSection - Section directive for Dwarf info. + /// + const char *DwarfRangesSection; // Defaults to ".debug_ranges". + + /// DwarfMacInfoSection - Section directive for Dwarf info. + /// + const char *DwarfMacInfoSection; // Defaults to ".debug_macinfo". + + /// DwarfEHFrameSection - Section directive for Exception frames. + /// + const char *DwarfEHFrameSection; // Defaults to ".eh_frame". + + /// DwarfExceptionSection - Section directive for Exception table. + /// + const char *DwarfExceptionSection; // Defaults to ".gcc_except_table". + + //===--- CBE Asm Translation Table -----------------------------------===// + + const char *const *AsmTransCBE; // Defaults to empty + + public: + explicit TargetAsmInfo(const TargetMachine &TM); + virtual ~TargetAsmInfo(); + + const Section* getNamedSection(const char *Name, + unsigned Flags = SectionFlags::None, + bool Override = false) const; + const Section* getUnnamedSection(const char *Directive, + unsigned Flags = SectionFlags::None, + bool Override = false) const; + + /// Measure the specified inline asm to determine an approximation of its + /// length. + virtual unsigned getInlineAsmLength(const char *Str) const; + + /// ExpandInlineAsm - This hook allows the target to expand an inline asm + /// call to be explicit llvm code if it wants to. This is useful for + /// turning simple inline asms into LLVM intrinsics, which gives the + /// compiler more information about the behavior of the code. + virtual bool ExpandInlineAsm(CallInst *CI) const { + return false; + } + + /// emitUsedDirectiveFor - This hook allows targets to selectively decide + /// not to emit the UsedDirective for some symbols in llvm.used. + virtual bool emitUsedDirectiveFor(const GlobalValue *GV, + Mangler *Mang) const { + return (GV!=0); + } + + /// PreferredEHDataFormat - This hook allows the target to select data + /// format used for encoding pointers in exception handling data. Reason is + /// 0 for data, 1 for code labels, 2 for function pointers. Global is true + /// if the symbol can be relocated. + virtual unsigned PreferredEHDataFormat(DwarfEncoding::Target Reason, + bool Global) const; + + /// SectionKindForGlobal - This hook allows the target to select proper + /// section kind used for global emission. + virtual SectionKind::Kind + SectionKindForGlobal(const GlobalValue *GV) const; + + /// RelocBehaviour - Describes how relocations should be treated when + /// selecting sections. Reloc::Global bit should be set if global + /// relocations should force object to be placed in read-write + /// sections. Reloc::Local bit should be set if local relocations should + /// force object to be placed in read-write sections. + virtual unsigned RelocBehaviour() const; + + /// SectionFlagsForGlobal - This hook allows the target to select proper + /// section flags either for given global or for section. + virtual unsigned + SectionFlagsForGlobal(const GlobalValue *GV = NULL, + const char* name = NULL) const; + + /// SectionForGlobal - This hooks returns proper section name for given + /// global with all necessary flags and marks. + virtual const Section* SectionForGlobal(const GlobalValue *GV) const; + + // Helper methods for SectionForGlobal + virtual std::string UniqueSectionForGlobal(const GlobalValue* GV, + SectionKind::Kind kind) const; + + const std::string& getSectionFlags(unsigned Flags) const; + virtual std::string printSectionFlags(unsigned flags) const { return ""; } + + virtual const Section* SelectSectionForGlobal(const GlobalValue *GV) const; + + virtual const Section* SelectSectionForMachineConst(const Type *Ty) const; + + /// getSLEB128Size - Compute the number of bytes required for a signed + /// leb128 value. + + static unsigned getSLEB128Size(int Value); + + /// getULEB128Size - Compute the number of bytes required for an unsigned + /// leb128 value. + + static unsigned getULEB128Size(unsigned Value); + + // Data directive accessors + // + const char *getData8bitsDirective(unsigned AS = 0) const { + return AS == 0 ? Data8bitsDirective : getASDirective(8, AS); + } + const char *getData16bitsDirective(unsigned AS = 0) const { + return AS == 0 ? Data16bitsDirective : getASDirective(16, AS); + } + const char *getData32bitsDirective(unsigned AS = 0) const { + return AS == 0 ? Data32bitsDirective : getASDirective(32, AS); + } + const char *getData64bitsDirective(unsigned AS = 0) const { + return AS == 0 ? Data64bitsDirective : getASDirective(64, AS); + } + + + // Accessors. + // + const Section *getTextSection() const { + return TextSection; + } + const Section *getDataSection() const { + return DataSection; + } + const char *getBSSSection() const { + return BSSSection; + } + const Section *getBSSSection_() const { + return BSSSection_; + } + const Section *getReadOnlySection() const { + return ReadOnlySection; + } + const Section *getSmallDataSection() const { + return SmallDataSection; + } + const Section *getSmallBSSSection() const { + return SmallBSSSection; + } + const Section *getSmallRODataSection() const { + return SmallRODataSection; + } + const Section *getTLSDataSection() const { + return TLSDataSection; + } + const Section *getTLSBSSSection() const { + return TLSBSSSection; + } + const char *getZeroFillDirective() const { + return ZeroFillDirective; + } + const char *getNonexecutableStackDirective() const { + return NonexecutableStackDirective; + } + bool needsSet() const { + return NeedsSet; + } + const char *getPCSymbol() const { + return PCSymbol; + } + char getSeparatorChar() const { + return SeparatorChar; + } + const char *getCommentString() const { + return CommentString; + } + const char *getGlobalPrefix() const { + return GlobalPrefix; + } + const char *getPrivateGlobalPrefix() const { + return PrivateGlobalPrefix; + } + /// EHGlobalPrefix - Prefix for EH_frame and the .eh symbols. + /// This is normally PrivateGlobalPrefix, but some targets want + /// these symbols to be visible. + virtual const char *getEHGlobalPrefix() const { + return PrivateGlobalPrefix; + } + const char *getLessPrivateGlobalPrefix() const { + return LessPrivateGlobalPrefix; + } + const char *getJumpTableSpecialLabelPrefix() const { + return JumpTableSpecialLabelPrefix; + } + const char *getGlobalVarAddrPrefix() const { + return GlobalVarAddrPrefix; + } + const char *getGlobalVarAddrSuffix() const { + return GlobalVarAddrSuffix; + } + const char *getFunctionAddrPrefix() const { + return FunctionAddrPrefix; + } + const char *getFunctionAddrSuffix() const { + return FunctionAddrSuffix; + } + const char *getPersonalityPrefix() const { + return PersonalityPrefix; + } + const char *getPersonalitySuffix() const { + return PersonalitySuffix; + } + bool getNeedsIndirectEncoding() const { + return NeedsIndirectEncoding; + } + const char *getInlineAsmStart() const { + return InlineAsmStart; + } + const char *getInlineAsmEnd() const { + return InlineAsmEnd; + } + unsigned getAssemblerDialect() const { + return AssemblerDialect; + } + const char *getStringConstantPrefix() const { + return StringConstantPrefix; + } + const char *getZeroDirective() const { + return ZeroDirective; + } + const char *getZeroDirectiveSuffix() const { + return ZeroDirectiveSuffix; + } + const char *getAsciiDirective() const { + return AsciiDirective; + } + const char *getAscizDirective() const { + return AscizDirective; + } + const char *getJumpTableDirective() const { + return JumpTableDirective; + } + const char *getAlignDirective() const { + return AlignDirective; + } + bool getAlignmentIsInBytes() const { + return AlignmentIsInBytes; + } + unsigned getTextAlignFillValue() const { + return TextAlignFillValue; + } + const char *getSwitchToSectionDirective() const { + return SwitchToSectionDirective; + } + const char *getTextSectionStartSuffix() const { + return TextSectionStartSuffix; + } + const char *getDataSectionStartSuffix() const { + return DataSectionStartSuffix; + } + const char *getSectionEndDirectiveSuffix() const { + return SectionEndDirectiveSuffix; + } + const char *getConstantPoolSection() const { + return ConstantPoolSection; + } + const char *getJumpTableDataSection() const { + return JumpTableDataSection; + } + const char *getCStringSection() const { + return CStringSection; + } + const Section *getCStringSection_() const { + return CStringSection_; + } + const char *getStaticCtorsSection() const { + return StaticCtorsSection; + } + const char *getStaticDtorsSection() const { + return StaticDtorsSection; + } + const char *getGlobalDirective() const { + return GlobalDirective; + } + const char *getExternDirective() const { + return ExternDirective; + } + const char *getSetDirective() const { + return SetDirective; + } + const char *getLCOMMDirective() const { + return LCOMMDirective; + } + const char *getCOMMDirective() const { + return COMMDirective; + } + bool getCOMMDirectiveTakesAlignment() const { + return COMMDirectiveTakesAlignment; + } + bool hasDotTypeDotSizeDirective() const { + return HasDotTypeDotSizeDirective; + } + bool hasSingleParameterDotFile() const { + return HasSingleParameterDotFile; + } + const char *getUsedDirective() const { + return UsedDirective; + } + const char *getWeakRefDirective() const { + return WeakRefDirective; + } + const char *getWeakDefDirective() const { + return WeakDefDirective; + } + const char *getHiddenDirective() const { + return HiddenDirective; + } + const char *getProtectedDirective() const { + return ProtectedDirective; + } + bool isAbsoluteDebugSectionOffsets() const { + return AbsoluteDebugSectionOffsets; + } + bool isAbsoluteEHSectionOffsets() const { + return AbsoluteEHSectionOffsets; + } + bool hasLEB128() const { + return HasLEB128; + } + bool hasDotLocAndDotFile() const { + return HasDotLocAndDotFile; + } + bool doesSupportDebugInformation() const { + return SupportsDebugInformation; + } + bool doesSupportExceptionHandling() const { + return SupportsExceptionHandling; + } + bool doesDwarfRequireFrameSection() const { + return DwarfRequiresFrameSection; + } + bool doesDwarfUsesInlineInfoSection() const { + return DwarfUsesInlineInfoSection; + } + bool doesSupportMacInfoSection() const { + return SupportsMacInfoSection; + } + bool doesRequireNonLocalEHFrameLabel() const { + return NonLocalEHFrameLabel; + } + const char *getGlobalEHDirective() const { + return GlobalEHDirective; + } + bool getSupportsWeakOmittedEHFrame() const { + return SupportsWeakOmittedEHFrame; + } + const char *getDwarfSectionOffsetDirective() const { + return DwarfSectionOffsetDirective; + } + const char *getDwarfAbbrevSection() const { + return DwarfAbbrevSection; + } + const char *getDwarfInfoSection() const { + return DwarfInfoSection; + } + const char *getDwarfLineSection() const { + return DwarfLineSection; + } + const char *getDwarfFrameSection() const { + return DwarfFrameSection; + } + const char *getDwarfPubNamesSection() const { + return DwarfPubNamesSection; + } + const char *getDwarfPubTypesSection() const { + return DwarfPubTypesSection; + } + const char *getDwarfDebugInlineSection() const { + return DwarfDebugInlineSection; + } + const char *getDwarfStrSection() const { + return DwarfStrSection; + } + const char *getDwarfLocSection() const { + return DwarfLocSection; + } + const char *getDwarfARangesSection() const { + return DwarfARangesSection; + } + const char *getDwarfRangesSection() const { + return DwarfRangesSection; + } + const char *getDwarfMacInfoSection() const { + return DwarfMacInfoSection; + } + const char *getDwarfEHFrameSection() const { + return DwarfEHFrameSection; + } + const char *getDwarfExceptionSection() const { + return DwarfExceptionSection; + } + const char *const *getAsmCBE() const { + return AsmTransCBE; + } + }; +} + +#endif diff --git a/include/llvm/Target/TargetCallingConv.td b/include/llvm/Target/TargetCallingConv.td new file mode 100644 index 0000000000000..224c08e176c79 --- /dev/null +++ b/include/llvm/Target/TargetCallingConv.td @@ -0,0 +1,114 @@ +//===- TargetCallingConv.td - Target Calling Conventions ---*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the target-independent interfaces with which targets +// describe their calling conventions. +// +//===----------------------------------------------------------------------===// + +class CCAction; +class CallingConv; + +/// CCCustom - Calls a custom arg handling function. +class CCCustom<string fn> : CCAction { + string FuncName = fn; +} + +/// CCPredicateAction - Instances of this class check some predicate, then +/// delegate to another action if the predicate is true. +class CCPredicateAction<CCAction A> : CCAction { + CCAction SubAction = A; +} + +/// CCIfType - If the current argument is one of the specified types, apply +/// Action A. +class CCIfType<list<ValueType> vts, CCAction A> : CCPredicateAction<A> { + list<ValueType> VTs = vts; +} + +/// CCIf - If the predicate matches, apply A. +class CCIf<string predicate, CCAction A> : CCPredicateAction<A> { + string Predicate = predicate; +} + +/// CCIfByVal - If the current argument has ByVal parameter attribute, apply +/// Action A. +class CCIfByVal<CCAction A> : CCIf<"ArgFlags.isByVal()", A> { +} + +/// CCIfCC - Match of the current calling convention is 'CC'. +class CCIfCC<string CC, CCAction A> + : CCIf<!strconcat("State.getCallingConv() == ", CC), A> {} + +/// CCIfInReg - If this argument is marked with the 'inreg' attribute, apply +/// the specified action. +class CCIfInReg<CCAction A> : CCIf<"ArgFlags.isInReg()", A> {} + +/// CCIfNest - If this argument is marked with the 'nest' attribute, apply +/// the specified action. +class CCIfNest<CCAction A> : CCIf<"ArgFlags.isNest()", A> {} + +/// CCIfNotVarArg - If the current function is not vararg - apply the action +class CCIfNotVarArg<CCAction A> : CCIf<"!State.isVarArg()", A> {} + +/// CCAssignToReg - This action matches if there is a register in the specified +/// list that is still available. If so, it assigns the value to the first +/// available register and succeeds. +class CCAssignToReg<list<Register> regList> : CCAction { + list<Register> RegList = regList; +} + +/// CCAssignToRegWithShadow - Same as CCAssignToReg, but with list of registers +/// which became shadowed, when some register is used. +class CCAssignToRegWithShadow<list<Register> regList, + list<Register> shadowList> : CCAction { + list<Register> RegList = regList; + list<Register> ShadowRegList = shadowList; +} + +/// CCAssignToStack - This action always matches: it assigns the value to a +/// stack slot of the specified size and alignment on the stack. If size is +/// zero then the ABI size is used; if align is zero then the ABI alignment +/// is used - these may depend on the target or subtarget. +class CCAssignToStack<int size, int align> : CCAction { + int Size = size; + int Align = align; +} + +/// CCPassByVal - This action always matches: it assigns the value to a stack +/// slot to implement ByVal aggregate parameter passing. Size and alignment +/// specify the minimum size and alignment for the stack slot. +class CCPassByVal<int size, int align> : CCAction { + int Size = size; + int Align = align; +} + +/// CCPromoteToType - If applied, this promotes the specified current value to +/// the specified type. +class CCPromoteToType<ValueType destTy> : CCAction { + ValueType DestTy = destTy; +} + +/// CCBitConvertToType - If applied, this bitconverts the specified current +/// value to the specified type. +class CCBitConvertToType<ValueType destTy> : CCAction { + ValueType DestTy = destTy; +} + +/// CCDelegateTo - This action invokes the specified sub-calling-convention. It +/// is successful if the specified CC matches. +class CCDelegateTo<CallingConv cc> : CCAction { + CallingConv CC = cc; +} + +/// CallingConv - An instance of this is used to define each calling convention +/// that the target supports. +class CallingConv<list<CCAction> actions> { + list<CCAction> Actions = actions; +} diff --git a/include/llvm/Target/TargetData.h b/include/llvm/Target/TargetData.h new file mode 100644 index 0000000000000..82abfc72864fd --- /dev/null +++ b/include/llvm/Target/TargetData.h @@ -0,0 +1,317 @@ +//===-- llvm/Target/TargetData.h - Data size & alignment info ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines target properties related to datatype size/offset/alignment +// information. It uses lazy annotations to cache information about how +// structure types are laid out and used. +// +// This structure should be created once, filled in if the defaults are not +// correct and then passed around by const&. None of the members functions +// require modification to the object. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETDATA_H +#define LLVM_TARGET_TARGETDATA_H + +#include "llvm/Pass.h" +#include "llvm/Support/DataTypes.h" +#include "llvm/ADT/SmallVector.h" +#include <string> + +namespace llvm { + +class Value; +class Type; +class IntegerType; +class StructType; +class StructLayout; +class GlobalVariable; + +/// Enum used to categorize the alignment types stored by TargetAlignElem +enum AlignTypeEnum { + INTEGER_ALIGN = 'i', ///< Integer type alignment + VECTOR_ALIGN = 'v', ///< Vector type alignment + FLOAT_ALIGN = 'f', ///< Floating point type alignment + AGGREGATE_ALIGN = 'a', ///< Aggregate alignment + STACK_ALIGN = 's' ///< Stack objects alignment +}; +/// Target alignment element. +/// +/// Stores the alignment data associated with a given alignment type (pointer, +/// integer, vector, float) and type bit width. +/// +/// @note The unusual order of elements in the structure attempts to reduce +/// padding and make the structure slightly more cache friendly. +struct TargetAlignElem { + AlignTypeEnum AlignType : 8; //< Alignment type (AlignTypeEnum) + unsigned char ABIAlign; //< ABI alignment for this type/bitw + unsigned char PrefAlign; //< Pref. alignment for this type/bitw + uint32_t TypeBitWidth; //< Type bit width + + /// Initializer + static TargetAlignElem get(AlignTypeEnum align_type, unsigned char abi_align, + unsigned char pref_align, uint32_t bit_width); + /// Equality predicate + bool operator==(const TargetAlignElem &rhs) const; + /// output stream operator + std::ostream &dump(std::ostream &os) const; +}; + +class TargetData : public ImmutablePass { +private: + bool LittleEndian; ///< Defaults to false + unsigned char PointerMemSize; ///< Pointer size in bytes + unsigned char PointerABIAlign; ///< Pointer ABI alignment + unsigned char PointerPrefAlign; ///< Pointer preferred alignment + + //! Where the primitive type alignment data is stored. + /*! + @sa init(). + @note Could support multiple size pointer alignments, e.g., 32-bit pointers + vs. 64-bit pointers by extending TargetAlignment, but for now, we don't. + */ + SmallVector<TargetAlignElem, 16> Alignments; + //! Alignment iterator shorthand + typedef SmallVector<TargetAlignElem, 16>::iterator align_iterator; + //! Constant alignment iterator shorthand + typedef SmallVector<TargetAlignElem, 16>::const_iterator align_const_iterator; + //! Invalid alignment. + /*! + This member is a signal that a requested alignment type and bit width were + not found in the SmallVector. + */ + static const TargetAlignElem InvalidAlignmentElem; + + //! Set/initialize target alignments + void setAlignment(AlignTypeEnum align_type, unsigned char abi_align, + unsigned char pref_align, uint32_t bit_width); + unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width, + bool ABIAlign, const Type *Ty) const; + //! Internal helper method that returns requested alignment for type. + unsigned char getAlignment(const Type *Ty, bool abi_or_pref) const; + + /// Valid alignment predicate. + /// + /// Predicate that tests a TargetAlignElem reference returned by get() against + /// InvalidAlignmentElem. + inline bool validAlignment(const TargetAlignElem &align) const { + return (&align != &InvalidAlignmentElem); + } + +public: + /// Default ctor. + /// + /// @note This has to exist, because this is a pass, but it should never be + /// used. + TargetData() : ImmutablePass(&ID) { + assert(0 && "ERROR: Bad TargetData ctor used. " + "Tool did not specify a TargetData to use?"); + abort(); + } + + /// Constructs a TargetData from a specification string. See init(). + explicit TargetData(const std::string &TargetDescription) + : ImmutablePass(&ID) { + init(TargetDescription); + } + + /// Initialize target data from properties stored in the module. + explicit TargetData(const Module *M); + + TargetData(const TargetData &TD) : + ImmutablePass(&ID), + LittleEndian(TD.isLittleEndian()), + PointerMemSize(TD.PointerMemSize), + PointerABIAlign(TD.PointerABIAlign), + PointerPrefAlign(TD.PointerPrefAlign), + Alignments(TD.Alignments) + { } + + ~TargetData(); // Not virtual, do not subclass this class + + //! Parse a target data layout string and initialize TargetData alignments. + void init(const std::string &TargetDescription); + + /// Target endianness... + bool isLittleEndian() const { return LittleEndian; } + bool isBigEndian() const { return !LittleEndian; } + + /// getStringRepresentation - Return the string representation of the + /// TargetData. This representation is in the same format accepted by the + /// string constructor above. + std::string getStringRepresentation() const; + /// Target pointer alignment + unsigned char getPointerABIAlignment() const { return PointerABIAlign; } + /// Return target's alignment for stack-based pointers + unsigned char getPointerPrefAlignment() const { return PointerPrefAlign; } + /// Target pointer size + unsigned char getPointerSize() const { return PointerMemSize; } + /// Target pointer size, in bits + unsigned char getPointerSizeInBits() const { return 8*PointerMemSize; } + + /// Size examples: + /// + /// Type SizeInBits StoreSizeInBits AllocSizeInBits[*] + /// ---- ---------- --------------- --------------- + /// i1 1 8 8 + /// i8 8 8 8 + /// i19 19 24 32 + /// i32 32 32 32 + /// i100 100 104 128 + /// i128 128 128 128 + /// Float 32 32 32 + /// Double 64 64 64 + /// X86_FP80 80 80 96 + /// + /// [*] The alloc size depends on the alignment, and thus on the target. + /// These values are for x86-32 linux. + + /// getTypeSizeInBits - Return the number of bits necessary to hold the + /// specified type. For example, returns 36 for i36 and 80 for x86_fp80. + uint64_t getTypeSizeInBits(const Type* Ty) const; + + /// getTypeStoreSize - Return the maximum number of bytes that may be + /// overwritten by storing the specified type. For example, returns 5 + /// for i36 and 10 for x86_fp80. + uint64_t getTypeStoreSize(const Type *Ty) const { + return (getTypeSizeInBits(Ty)+7)/8; + } + + /// getTypeStoreSizeInBits - Return the maximum number of bits that may be + /// overwritten by storing the specified type; always a multiple of 8. For + /// example, returns 40 for i36 and 80 for x86_fp80. + uint64_t getTypeStoreSizeInBits(const Type *Ty) const { + return 8*getTypeStoreSize(Ty); + } + + /// getTypeAllocSize - Return the offset in bytes between successive objects + /// of the specified type, including alignment padding. This is the amount + /// that alloca reserves for this type. For example, returns 12 or 16 for + /// x86_fp80, depending on alignment. + uint64_t getTypeAllocSize(const Type* Ty) const { + // Round up to the next alignment boundary. + return RoundUpAlignment(getTypeStoreSize(Ty), getABITypeAlignment(Ty)); + } + + /// getTypeAllocSizeInBits - Return the offset in bits between successive + /// objects of the specified type, including alignment padding; always a + /// multiple of 8. This is the amount that alloca reserves for this type. + /// For example, returns 96 or 128 for x86_fp80, depending on alignment. + uint64_t getTypeAllocSizeInBits(const Type* Ty) const { + return 8*getTypeAllocSize(Ty); + } + + /// getABITypeAlignment - Return the minimum ABI-required alignment for the + /// specified type. + unsigned char getABITypeAlignment(const Type *Ty) const; + + /// getCallFrameTypeAlignment - Return the minimum ABI-required alignment + /// for the specified type when it is part of a call frame. + unsigned char getCallFrameTypeAlignment(const Type *Ty) const; + + + /// getPrefTypeAlignment - Return the preferred stack/global alignment for + /// the specified type. This is always at least as good as the ABI alignment. + unsigned char getPrefTypeAlignment(const Type *Ty) const; + + /// getPreferredTypeAlignmentShift - Return the preferred alignment for the + /// specified type, returned as log2 of the value (a shift amount). + /// + unsigned char getPreferredTypeAlignmentShift(const Type *Ty) const; + + /// getIntPtrType - Return an unsigned integer type that is the same size or + /// greater to the host pointer size. + /// + const IntegerType *getIntPtrType() const; + + /// getIndexedOffset - return the offset from the beginning of the type for + /// the specified indices. This is used to implement getelementptr. + /// + uint64_t getIndexedOffset(const Type *Ty, + Value* const* Indices, unsigned NumIndices) const; + + /// getStructLayout - Return a StructLayout object, indicating the alignment + /// of the struct, its size, and the offsets of its fields. Note that this + /// information is lazily cached. + const StructLayout *getStructLayout(const StructType *Ty) const; + + /// InvalidateStructLayoutInfo - TargetData speculatively caches StructLayout + /// objects. If a TargetData object is alive when types are being refined and + /// removed, this method must be called whenever a StructType is removed to + /// avoid a dangling pointer in this cache. + void InvalidateStructLayoutInfo(const StructType *Ty) const; + + /// getPreferredAlignment - Return the preferred alignment of the specified + /// global. This includes an explicitly requested alignment (if the global + /// has one). + unsigned getPreferredAlignment(const GlobalVariable *GV) const; + + /// getPreferredAlignmentLog - Return the preferred alignment of the + /// specified global, returned in log form. This includes an explicitly + /// requested alignment (if the global has one). + unsigned getPreferredAlignmentLog(const GlobalVariable *GV) const; + + /// RoundUpAlignment - Round the specified value up to the next alignment + /// boundary specified by Alignment. For example, 7 rounded up to an + /// alignment boundary of 4 is 8. 8 rounded up to the alignment boundary of 4 + /// is 8 because it is already aligned. + template <typename UIntTy> + static UIntTy RoundUpAlignment(UIntTy Val, unsigned Alignment) { + assert((Alignment & (Alignment-1)) == 0 && "Alignment must be power of 2!"); + return (Val + (Alignment-1)) & ~UIntTy(Alignment-1); + } + + static char ID; // Pass identification, replacement for typeid +}; + +/// StructLayout - used to lazily calculate structure layout information for a +/// target machine, based on the TargetData structure. +/// +class StructLayout { + uint64_t StructSize; + unsigned StructAlignment; + unsigned NumElements; + uint64_t MemberOffsets[1]; // variable sized array! +public: + + uint64_t getSizeInBytes() const { + return StructSize; + } + + uint64_t getSizeInBits() const { + return 8*StructSize; + } + + unsigned getAlignment() const { + return StructAlignment; + } + + /// getElementContainingOffset - Given a valid byte offset into the structure, + /// return the structure index that contains it. + /// + unsigned getElementContainingOffset(uint64_t Offset) const; + + uint64_t getElementOffset(unsigned Idx) const { + assert(Idx < NumElements && "Invalid element idx!"); + return MemberOffsets[Idx]; + } + + uint64_t getElementOffsetInBits(unsigned Idx) const { + return getElementOffset(Idx)*8; + } + +private: + friend class TargetData; // Only TargetData can create this class + StructLayout(const StructType *ST, const TargetData &TD); +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetELFWriterInfo.h b/include/llvm/Target/TargetELFWriterInfo.h new file mode 100644 index 0000000000000..548cc077a9462 --- /dev/null +++ b/include/llvm/Target/TargetELFWriterInfo.h @@ -0,0 +1,41 @@ +//===-- llvm/Target/TargetELFWriterInfo.h - ELF Writer Info -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the TargetELFWriterInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETELFWRITERINFO_H +#define LLVM_TARGET_TARGETELFWRITERINFO_H + +namespace llvm { + + //===--------------------------------------------------------------------===// + // TargetELFWriterInfo + //===--------------------------------------------------------------------===// + + class TargetELFWriterInfo { + // EMachine - This field is the target specific value to emit as the + // e_machine member of the ELF header. + unsigned short EMachine; + public: + enum MachineType { + NoMachine, + EM_386 = 3 + }; + + explicit TargetELFWriterInfo(MachineType machine) : EMachine(machine) {} + virtual ~TargetELFWriterInfo() {} + + unsigned short getEMachine() const { return EMachine; } + }; + +} // end llvm namespace + +#endif // LLVM_TARGET_TARGETELFWRITERINFO_H diff --git a/include/llvm/Target/TargetFrameInfo.h b/include/llvm/Target/TargetFrameInfo.h new file mode 100644 index 0000000000000..3e26b9dd01bea --- /dev/null +++ b/include/llvm/Target/TargetFrameInfo.h @@ -0,0 +1,80 @@ +//===-- llvm/Target/TargetFrameInfo.h ---------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Interface to describe the layout of a stack frame on the target machine. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETFRAMEINFO_H +#define LLVM_TARGET_TARGETFRAMEINFO_H + +#include <utility> + +namespace llvm { + +/// Information about stack frame layout on the target. It holds the direction +/// of stack growth, the known stack alignment on entry to each function, and +/// the offset to the locals area. +/// +/// The offset to the local area is the offset from the stack pointer on +/// function entry to the first location where function data (local variables, +/// spill locations) can be stored. +class TargetFrameInfo { +public: + enum StackDirection { + StackGrowsUp, // Adding to the stack increases the stack address + StackGrowsDown // Adding to the stack decreases the stack address + }; +private: + StackDirection StackDir; + unsigned StackAlignment; + int LocalAreaOffset; +public: + TargetFrameInfo(StackDirection D, unsigned StackAl, int LAO) + : StackDir(D), StackAlignment(StackAl), LocalAreaOffset(LAO) {} + + virtual ~TargetFrameInfo(); + + // These methods return information that describes the abstract stack layout + // of the target machine. + + /// getStackGrowthDirection - Return the direction the stack grows + /// + StackDirection getStackGrowthDirection() const { return StackDir; } + + /// getStackAlignment - This method returns the number of bytes that the stack + /// pointer must be aligned to. Typically, this is the largest alignment for + /// any data object in the target. + /// + unsigned getStackAlignment() const { return StackAlignment; } + + /// getOffsetOfLocalArea - This method returns the offset of the local area + /// from the stack pointer on entrance to a function. + /// + int getOffsetOfLocalArea() const { return LocalAreaOffset; } + + /// getCalleeSavedSpillSlots - This method returns a pointer to an array of + /// pairs, that contains an entry for each callee saved register that must be + /// spilled to a particular stack location if it is spilled. + /// + /// Each entry in this array contains a <register,offset> pair, indicating the + /// fixed offset from the incoming stack pointer that each register should be + /// spilled at. If a register is not listed here, the code generator is + /// allowed to spill it anywhere it chooses. + /// + virtual const std::pair<unsigned, int> * + getCalleeSavedSpillSlots(unsigned &NumEntries) const { + NumEntries = 0; + return 0; + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetInstrDesc.h b/include/llvm/Target/TargetInstrDesc.h new file mode 100644 index 0000000000000..622a216c33c63 --- /dev/null +++ b/include/llvm/Target/TargetInstrDesc.h @@ -0,0 +1,435 @@ +//===-- llvm/Target/TargetInstrDesc.h - Instruction Descriptors -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the TargetOperandInfo and TargetInstrDesc classes, which +// are used to describe target instructions and their operands. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETINSTRDESC_H +#define LLVM_TARGET_TARGETINSTRDESC_H + +namespace llvm { + +class TargetRegisterClass; + +//===----------------------------------------------------------------------===// +// Machine Operand Flags and Description +//===----------------------------------------------------------------------===// + +namespace TOI { + // Operand constraints: only "tied_to" for now. + enum OperandConstraint { + TIED_TO = 0 // Must be allocated the same register as. + }; + + /// OperandFlags - These are flags set on operands, but should be considered + /// private, all access should go through the TargetOperandInfo accessors. + /// See the accessors for a description of what these are. + enum OperandFlags { + LookupPtrRegClass = 0, + Predicate, + OptionalDef + }; +} + +/// TargetOperandInfo - This holds information about one operand of a machine +/// instruction, indicating the register class for register operands, etc. +/// +class TargetOperandInfo { +public: + /// RegClass - This specifies the register class enumeration of the operand + /// if the operand is a register. If not, this contains 0. + unsigned short RegClass; + unsigned short Flags; + /// Lower 16 bits are used to specify which constraints are set. The higher 16 + /// bits are used to specify the value of constraints (4 bits each). + unsigned int Constraints; + /// Currently no other information. + + /// isLookupPtrRegClass - Set if this operand is a pointer value and it + /// requires a callback to look up its register class. + bool isLookupPtrRegClass() const { return Flags&(1 <<TOI::LookupPtrRegClass);} + + /// isPredicate - Set if this is one of the operands that made up of + /// the predicate operand that controls an isPredicable() instruction. + bool isPredicate() const { return Flags & (1 << TOI::Predicate); } + + /// isOptionalDef - Set if this operand is a optional def. + /// + bool isOptionalDef() const { return Flags & (1 << TOI::OptionalDef); } +}; + + +//===----------------------------------------------------------------------===// +// Machine Instruction Flags and Description +//===----------------------------------------------------------------------===// + +/// TargetInstrDesc flags - These should be considered private to the +/// implementation of the TargetInstrDesc class. Clients should use the +/// predicate methods on TargetInstrDesc, not use these directly. These +/// all correspond to bitfields in the TargetInstrDesc::Flags field. +namespace TID { + enum { + Variadic = 0, + HasOptionalDef, + Return, + Call, + Barrier, + Terminator, + Branch, + IndirectBranch, + Predicable, + NotDuplicable, + DelaySlot, + FoldableAsLoad, + MayLoad, + MayStore, + UnmodeledSideEffects, + Commutable, + ConvertibleTo3Addr, + UsesCustomDAGSchedInserter, + Rematerializable, + CheapAsAMove + }; +} + +/// TargetInstrDesc - Describe properties that are true of each +/// instruction in the target description file. This captures information about +/// side effects, register use and many other things. There is one instance of +/// this struct for each target instruction class, and the MachineInstr class +/// points to this struct directly to describe itself. +class TargetInstrDesc { +public: + unsigned short Opcode; // The opcode number + unsigned short NumOperands; // Num of args (may be more if variable_ops) + unsigned short NumDefs; // Num of args that are definitions + unsigned short SchedClass; // enum identifying instr sched class + const char * Name; // Name of the instruction record in td file + unsigned Flags; // Flags identifying machine instr class + unsigned TSFlags; // Target Specific Flag values + const unsigned *ImplicitUses; // Registers implicitly read by this instr + const unsigned *ImplicitDefs; // Registers implicitly defined by this instr + const TargetRegisterClass **RCBarriers; // Reg classes completely "clobbered" + const TargetOperandInfo *OpInfo; // 'NumOperands' entries about operands + + /// getOperandConstraint - Returns the value of the specific constraint if + /// it is set. Returns -1 if it is not set. + int getOperandConstraint(unsigned OpNum, + TOI::OperandConstraint Constraint) const { + if (OpNum < NumOperands && + (OpInfo[OpNum].Constraints & (1 << Constraint))) { + unsigned Pos = 16 + Constraint * 4; + return (int)(OpInfo[OpNum].Constraints >> Pos) & 0xf; + } + return -1; + } + + /// getOpcode - Return the opcode number for this descriptor. + unsigned getOpcode() const { + return Opcode; + } + + /// getName - Return the name of the record in the .td file for this + /// instruction, for example "ADD8ri". + const char *getName() const { + return Name; + } + + /// getNumOperands - Return the number of declared MachineOperands for this + /// MachineInstruction. Note that variadic (isVariadic() returns true) + /// instructions may have additional operands at the end of the list, and note + /// that the machine instruction may include implicit register def/uses as + /// well. + unsigned getNumOperands() const { + return NumOperands; + } + + /// getNumDefs - Return the number of MachineOperands that are register + /// definitions. Register definitions always occur at the start of the + /// machine operand list. This is the number of "outs" in the .td file, + /// and does not include implicit defs. + unsigned getNumDefs() const { + return NumDefs; + } + + /// isVariadic - Return true if this instruction can have a variable number of + /// operands. In this case, the variable operands will be after the normal + /// operands but before the implicit definitions and uses (if any are + /// present). + bool isVariadic() const { + return Flags & (1 << TID::Variadic); + } + + /// hasOptionalDef - Set if this instruction has an optional definition, e.g. + /// ARM instructions which can set condition code if 's' bit is set. + bool hasOptionalDef() const { + return Flags & (1 << TID::HasOptionalDef); + } + + /// getImplicitUses - Return a list of registers that are potentially + /// read by any instance of this machine instruction. For example, on X86, + /// the "adc" instruction adds two register operands and adds the carry bit in + /// from the flags register. In this case, the instruction is marked as + /// implicitly reading the flags. Likewise, the variable shift instruction on + /// X86 is marked as implicitly reading the 'CL' register, which it always + /// does. + /// + /// This method returns null if the instruction has no implicit uses. + const unsigned *getImplicitUses() const { + return ImplicitUses; + } + + /// getImplicitDefs - Return a list of registers that are potentially + /// written by any instance of this machine instruction. For example, on X86, + /// many instructions implicitly set the flags register. In this case, they + /// are marked as setting the FLAGS. Likewise, many instructions always + /// deposit their result in a physical register. For example, the X86 divide + /// instruction always deposits the quotient and remainder in the EAX/EDX + /// registers. For that instruction, this will return a list containing the + /// EAX/EDX/EFLAGS registers. + /// + /// This method returns null if the instruction has no implicit defs. + const unsigned *getImplicitDefs() const { + return ImplicitDefs; + } + + /// hasImplicitUseOfPhysReg - Return true if this instruction implicitly + /// uses the specified physical register. + bool hasImplicitUseOfPhysReg(unsigned Reg) const { + if (const unsigned *ImpUses = ImplicitUses) + for (; *ImpUses; ++ImpUses) + if (*ImpUses == Reg) return true; + return false; + } + + /// hasImplicitDefOfPhysReg - Return true if this instruction implicitly + /// defines the specified physical register. + bool hasImplicitDefOfPhysReg(unsigned Reg) const { + if (const unsigned *ImpDefs = ImplicitDefs) + for (; *ImpDefs; ++ImpDefs) + if (*ImpDefs == Reg) return true; + return false; + } + + /// getRegClassBarriers - Return a list of register classes that are + /// completely clobbered by this machine instruction. For example, on X86 + /// the call instructions will completely clobber all the registers in the + /// fp stack and XMM classes. + /// + /// This method returns null if the instruction doesn't completely clobber + /// any register class. + const TargetRegisterClass **getRegClassBarriers() const { + return RCBarriers; + } + + /// getSchedClass - Return the scheduling class for this instruction. The + /// scheduling class is an index into the InstrItineraryData table. This + /// returns zero if there is no known scheduling information for the + /// instruction. + /// + unsigned getSchedClass() const { + return SchedClass; + } + + bool isReturn() const { + return Flags & (1 << TID::Return); + } + + bool isCall() const { + return Flags & (1 << TID::Call); + } + + /// isBarrier - Returns true if the specified instruction stops control flow + /// from executing the instruction immediately following it. Examples include + /// unconditional branches and return instructions. + bool isBarrier() const { + return Flags & (1 << TID::Barrier); + } + + /// isTerminator - Returns true if this instruction part of the terminator for + /// a basic block. Typically this is things like return and branch + /// instructions. + /// + /// Various passes use this to insert code into the bottom of a basic block, + /// but before control flow occurs. + bool isTerminator() const { + return Flags & (1 << TID::Terminator); + } + + /// isBranch - Returns true if this is a conditional, unconditional, or + /// indirect branch. Predicates below can be used to discriminate between + /// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to + /// get more information. + bool isBranch() const { + return Flags & (1 << TID::Branch); + } + + /// isIndirectBranch - Return true if this is an indirect branch, such as a + /// branch through a register. + bool isIndirectBranch() const { + return Flags & (1 << TID::IndirectBranch); + } + + /// isConditionalBranch - Return true if this is a branch which may fall + /// through to the next instruction or may transfer control flow to some other + /// block. The TargetInstrInfo::AnalyzeBranch method can be used to get more + /// information about this branch. + bool isConditionalBranch() const { + return isBranch() & !isBarrier() & !isIndirectBranch(); + } + + /// isUnconditionalBranch - Return true if this is a branch which always + /// transfers control flow to some other block. The + /// TargetInstrInfo::AnalyzeBranch method can be used to get more information + /// about this branch. + bool isUnconditionalBranch() const { + return isBranch() & isBarrier() & !isIndirectBranch(); + } + + // isPredicable - Return true if this instruction has a predicate operand that + // controls execution. It may be set to 'always', or may be set to other + /// values. There are various methods in TargetInstrInfo that can be used to + /// control and modify the predicate in this instruction. + bool isPredicable() const { + return Flags & (1 << TID::Predicable); + } + + /// isNotDuplicable - Return true if this instruction cannot be safely + /// duplicated. For example, if the instruction has a unique labels attached + /// to it, duplicating it would cause multiple definition errors. + bool isNotDuplicable() const { + return Flags & (1 << TID::NotDuplicable); + } + + /// hasDelaySlot - Returns true if the specified instruction has a delay slot + /// which must be filled by the code generator. + bool hasDelaySlot() const { + return Flags & (1 << TID::DelaySlot); + } + + /// canFoldAsLoad - Return true for instructions that can be folded as + /// memory operands in other instructions. The most common use for this + /// is instructions that are simple loads from memory that don't modify + /// the loaded value in any way, but it can also be used for instructions + /// that can be expressed as constant-pool loads, such as V_SETALLONES + /// on x86, to allow them to be folded when it is beneficial. + /// This should only be set on instructions that return a value in their + /// only virtual register definition. + bool canFoldAsLoad() const { + return Flags & (1 << TID::FoldableAsLoad); + } + + //===--------------------------------------------------------------------===// + // Side Effect Analysis + //===--------------------------------------------------------------------===// + + /// mayLoad - Return true if this instruction could possibly read memory. + /// Instructions with this flag set are not necessarily simple load + /// instructions, they may load a value and modify it, for example. + bool mayLoad() const { + return Flags & (1 << TID::MayLoad); + } + + + /// mayStore - Return true if this instruction could possibly modify memory. + /// Instructions with this flag set are not necessarily simple store + /// instructions, they may store a modified value based on their operands, or + /// may not actually modify anything, for example. + bool mayStore() const { + return Flags & (1 << TID::MayStore); + } + + /// hasUnmodeledSideEffects - Return true if this instruction has side + /// effects that are not modeled by other flags. This does not return true + /// for instructions whose effects are captured by: + /// + /// 1. Their operand list and implicit definition/use list. Register use/def + /// info is explicit for instructions. + /// 2. Memory accesses. Use mayLoad/mayStore. + /// 3. Calling, branching, returning: use isCall/isReturn/isBranch. + /// + /// Examples of side effects would be modifying 'invisible' machine state like + /// a control register, flushing a cache, modifying a register invisible to + /// LLVM, etc. + /// + bool hasUnmodeledSideEffects() const { + return Flags & (1 << TID::UnmodeledSideEffects); + } + + //===--------------------------------------------------------------------===// + // Flags that indicate whether an instruction can be modified by a method. + //===--------------------------------------------------------------------===// + + /// isCommutable - Return true if this may be a 2- or 3-address + /// instruction (of the form "X = op Y, Z, ..."), which produces the same + /// result if Y and Z are exchanged. If this flag is set, then the + /// TargetInstrInfo::commuteInstruction method may be used to hack on the + /// instruction. + /// + /// Note that this flag may be set on instructions that are only commutable + /// sometimes. In these cases, the call to commuteInstruction will fail. + /// Also note that some instructions require non-trivial modification to + /// commute them. + bool isCommutable() const { + return Flags & (1 << TID::Commutable); + } + + /// isConvertibleTo3Addr - Return true if this is a 2-address instruction + /// which can be changed into a 3-address instruction if needed. Doing this + /// transformation can be profitable in the register allocator, because it + /// means that the instruction can use a 2-address form if possible, but + /// degrade into a less efficient form if the source and dest register cannot + /// be assigned to the same register. For example, this allows the x86 + /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which + /// is the same speed as the shift but has bigger code size. + /// + /// If this returns true, then the target must implement the + /// TargetInstrInfo::convertToThreeAddress method for this instruction, which + /// is allowed to fail if the transformation isn't valid for this specific + /// instruction (e.g. shl reg, 4 on x86). + /// + bool isConvertibleTo3Addr() const { + return Flags & (1 << TID::ConvertibleTo3Addr); + } + + /// usesCustomDAGSchedInsertionHook - Return true if this instruction requires + /// custom insertion support when the DAG scheduler is inserting it into a + /// machine basic block. If this is true for the instruction, it basically + /// means that it is a pseudo instruction used at SelectionDAG time that is + /// expanded out into magic code by the target when MachineInstrs are formed. + /// + /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method + /// is used to insert this into the MachineBasicBlock. + bool usesCustomDAGSchedInsertionHook() const { + return Flags & (1 << TID::UsesCustomDAGSchedInserter); + } + + /// isRematerializable - Returns true if this instruction is a candidate for + /// remat. This flag is deprecated, please don't use it anymore. If this + /// flag is set, the isReallyTriviallyReMaterializable() method is called to + /// verify the instruction is really rematable. + bool isRematerializable() const { + return Flags & (1 << TID::Rematerializable); + } + + /// isAsCheapAsAMove - Returns true if this instruction has the same cost (or + /// less) than a move instruction. This is useful during certain types of + /// optimizations (e.g., remat during two-address conversion or machine licm) + /// where we would like to remat or hoist the instruction, but not if it costs + /// more than moving the instruction into the appropriate register. Note, we + /// are not marking copies from and to the same register class with this flag. + bool isAsCheapAsAMove() const { + return Flags & (1 << TID::CheapAsAMove); + } +}; + +} // end namespace llvm + +#endif diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h new file mode 100644 index 0000000000000..ecdd68258d55c --- /dev/null +++ b/include/llvm/Target/TargetInstrInfo.h @@ -0,0 +1,517 @@ +//===-- llvm/Target/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes the target machine instruction set to the code generator. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETINSTRINFO_H +#define LLVM_TARGET_TARGETINSTRINFO_H + +#include "llvm/Target/TargetInstrDesc.h" +#include "llvm/CodeGen/MachineFunction.h" + +namespace llvm { + +class TargetRegisterClass; +class TargetRegisterInfo; +class LiveVariables; +class CalleeSavedInfo; +class SDNode; +class SelectionDAG; + +template<class T> class SmallVectorImpl; + + +//--------------------------------------------------------------------------- +/// +/// TargetInstrInfo - Interface to description of machine instruction set +/// +class TargetInstrInfo { + const TargetInstrDesc *Descriptors; // Raw array to allow static init'n + unsigned NumOpcodes; // Number of entries in the desc array + + TargetInstrInfo(const TargetInstrInfo &); // DO NOT IMPLEMENT + void operator=(const TargetInstrInfo &); // DO NOT IMPLEMENT +public: + TargetInstrInfo(const TargetInstrDesc *desc, unsigned NumOpcodes); + virtual ~TargetInstrInfo(); + + // Invariant opcodes: All instruction sets have these as their low opcodes. + enum { + PHI = 0, + INLINEASM = 1, + DBG_LABEL = 2, + EH_LABEL = 3, + GC_LABEL = 4, + DECLARE = 5, + + /// EXTRACT_SUBREG - This instruction takes two operands: a register + /// that has subregisters, and a subregister index. It returns the + /// extracted subregister value. This is commonly used to implement + /// truncation operations on target architectures which support it. + EXTRACT_SUBREG = 6, + + /// INSERT_SUBREG - This instruction takes three operands: a register + /// that has subregisters, a register providing an insert value, and a + /// subregister index. It returns the value of the first register with + /// the value of the second register inserted. The first register is + /// often defined by an IMPLICIT_DEF, as is commonly used to implement + /// anyext operations on target architectures which support it. + INSERT_SUBREG = 7, + + /// IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef. + IMPLICIT_DEF = 8, + + /// SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except + /// that the first operand is an immediate integer constant. This constant + /// is often zero, as is commonly used to implement zext operations on + /// target architectures which support it, such as with x86-64 (with + /// zext from i32 to i64 via implicit zero-extension). + SUBREG_TO_REG = 9, + + /// COPY_TO_REGCLASS - This instruction is a placeholder for a plain + /// register-to-register copy into a specific register class. This is only + /// used between instruction selection and MachineInstr creation, before + /// virtual registers have been created for all the instructions, and it's + /// only needed in cases where the register classes implied by the + /// instructions are insufficient. The actual MachineInstrs to perform + /// the copy are emitted with the TargetInstrInfo::copyRegToReg hook. + COPY_TO_REGCLASS = 10 + }; + + unsigned getNumOpcodes() const { return NumOpcodes; } + + /// get - Return the machine instruction descriptor that corresponds to the + /// specified instruction opcode. + /// + const TargetInstrDesc &get(unsigned Opcode) const { + assert(Opcode < NumOpcodes && "Invalid opcode!"); + return Descriptors[Opcode]; + } + + /// isTriviallyReMaterializable - Return true if the instruction is trivially + /// rematerializable, meaning it has no side effects and requires no operands + /// that aren't always available. + bool isTriviallyReMaterializable(const MachineInstr *MI) const { + return MI->getDesc().isRematerializable() && + isReallyTriviallyReMaterializable(MI); + } + +protected: + /// isReallyTriviallyReMaterializable - For instructions with opcodes for + /// which the M_REMATERIALIZABLE flag is set, this function tests whether the + /// instruction itself is actually trivially rematerializable, considering + /// its operands. This is used for targets that have instructions that are + /// only trivially rematerializable for specific uses. This predicate must + /// return false if the instruction has any side effects other than + /// producing a value, or if it requres any address registers that are not + /// always available. + virtual bool isReallyTriviallyReMaterializable(const MachineInstr *MI) const { + return true; + } + +public: + /// Return true if the instruction is a register to register move and return + /// the source and dest operands and their sub-register indices by reference. + virtual bool isMoveInstr(const MachineInstr& MI, + unsigned& SrcReg, unsigned& DstReg, + unsigned& SrcSubIdx, unsigned& DstSubIdx) const { + return false; + } + + /// isLoadFromStackSlot - If the specified machine instruction is a direct + /// load from a stack slot, return the virtual or physical register number of + /// the destination along with the FrameIndex of the loaded stack slot. If + /// not, return 0. This predicate must return 0 if the instruction has + /// any side effects other than loading from the stack slot. + virtual unsigned isLoadFromStackSlot(const MachineInstr *MI, + int &FrameIndex) const { + return 0; + } + + /// isStoreToStackSlot - If the specified machine instruction is a direct + /// store to a stack slot, return the virtual or physical register number of + /// the source reg along with the FrameIndex of the loaded stack slot. If + /// not, return 0. This predicate must return 0 if the instruction has + /// any side effects other than storing to the stack slot. + virtual unsigned isStoreToStackSlot(const MachineInstr *MI, + int &FrameIndex) const { + return 0; + } + + /// reMaterialize - Re-issue the specified 'original' instruction at the + /// specific location targeting a new destination register. + virtual void reMaterialize(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, + const MachineInstr *Orig) const = 0; + + /// isInvariantLoad - Return true if the specified instruction (which is + /// marked mayLoad) is loading from a location whose value is invariant across + /// the function. For example, loading a value from the constant pool or from + /// from the argument area of a function if it does not change. This should + /// only return true of *all* loads the instruction does are invariant (if it + /// does multiple loads). + virtual bool isInvariantLoad(const MachineInstr *MI) const { + return false; + } + + /// convertToThreeAddress - This method must be implemented by targets that + /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target + /// may be able to convert a two-address instruction into one or more true + /// three-address instructions on demand. This allows the X86 target (for + /// example) to convert ADD and SHL instructions into LEA instructions if they + /// would require register copies due to two-addressness. + /// + /// This method returns a null pointer if the transformation cannot be + /// performed, otherwise it returns the last new instruction. + /// + virtual MachineInstr * + convertToThreeAddress(MachineFunction::iterator &MFI, + MachineBasicBlock::iterator &MBBI, LiveVariables *LV) const { + return 0; + } + + /// commuteInstruction - If a target has any instructions that are commutable, + /// but require converting to a different instruction or making non-trivial + /// changes to commute them, this method can overloaded to do this. The + /// default implementation of this method simply swaps the first two operands + /// of MI and returns it. + /// + /// If a target wants to make more aggressive changes, they can construct and + /// return a new machine instruction. If an instruction cannot commute, it + /// can also return null. + /// + /// If NewMI is true, then a new machine instruction must be created. + /// + virtual MachineInstr *commuteInstruction(MachineInstr *MI, + bool NewMI = false) const = 0; + + /// CommuteChangesDestination - Return true if commuting the specified + /// instruction will also changes the destination operand. Also return the + /// current operand index of the would be new destination register by + /// reference. This can happen when the commutable instruction is also a + /// two-address instruction. + virtual bool CommuteChangesDestination(MachineInstr *MI, + unsigned &OpIdx) const = 0; + + /// AnalyzeBranch - Analyze the branching code at the end of MBB, returning + /// true if it cannot be understood (e.g. it's a switch dispatch or isn't + /// implemented for a target). Upon success, this returns false and returns + /// with the following information in various cases: + /// + /// 1. If this block ends with no branches (it just falls through to its succ) + /// just return false, leaving TBB/FBB null. + /// 2. If this block ends with only an unconditional branch, it sets TBB to be + /// the destination block. + /// 3. If this block ends with an conditional branch and it falls through to + /// an successor block, it sets TBB to be the branch destination block and + /// a list of operands that evaluate the condition. These + /// operands can be passed to other TargetInstrInfo methods to create new + /// branches. + /// 4. If this block ends with an conditional branch and an unconditional + /// block, it returns the 'true' destination in TBB, the 'false' + /// destination in FBB, and a list of operands that evaluate the condition. + /// These operands can be passed to other TargetInstrInfo methods to create + /// new branches. + /// + /// Note that RemoveBranch and InsertBranch must be implemented to support + /// cases where this method returns success. + /// + /// If AllowModify is true, then this routine is allowed to modify the basic + /// block (e.g. delete instructions after the unconditional branch). + /// + virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl<MachineOperand> &Cond, + bool AllowModify = false) const { + return true; + } + + /// RemoveBranch - Remove the branching code at the end of the specific MBB. + /// This is only invoked in cases where AnalyzeBranch returns success. It + /// returns the number of instructions that were removed. + virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const { + assert(0 && "Target didn't implement TargetInstrInfo::RemoveBranch!"); + return 0; + } + + /// InsertBranch - Insert a branch into the end of the specified + /// MachineBasicBlock. This operands to this method are the same as those + /// returned by AnalyzeBranch. This is invoked in cases where AnalyzeBranch + /// returns success and when an unconditional branch (TBB is non-null, FBB is + /// null, Cond is empty) needs to be inserted. It returns the number of + /// instructions inserted. + /// + /// It is also invoked by tail merging to add unconditional branches in + /// cases where AnalyzeBranch doesn't apply because there was no original + /// branch to analyze. At least this much must be implemented, else tail + /// merging needs to be disabled. + virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, + MachineBasicBlock *FBB, + const SmallVectorImpl<MachineOperand> &Cond) const { + assert(0 && "Target didn't implement TargetInstrInfo::InsertBranch!"); + return 0; + } + + /// copyRegToReg - Emit instructions to copy between a pair of registers. It + /// returns false if the target does not how to copy between the specified + /// registers. + virtual bool copyRegToReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, unsigned SrcReg, + const TargetRegisterClass *DestRC, + const TargetRegisterClass *SrcRC) const { + assert(0 && "Target didn't implement TargetInstrInfo::copyRegToReg!"); + return false; + } + + /// storeRegToStackSlot - Store the specified register of the given register + /// class to the specified stack frame index. The store instruction is to be + /// added to the given machine basic block before the specified machine + /// instruction. If isKill is true, the register operand is the last use and + /// must be marked kill. + virtual void storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned SrcReg, bool isKill, int FrameIndex, + const TargetRegisterClass *RC) const { + assert(0 && "Target didn't implement TargetInstrInfo::storeRegToStackSlot!"); + } + + /// storeRegToAddr - Store the specified register of the given register class + /// to the specified address. The store instruction is to be added to the + /// given machine basic block before the specified machine instruction. If + /// isKill is true, the register operand is the last use and must be marked + /// kill. + virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill, + SmallVectorImpl<MachineOperand> &Addr, + const TargetRegisterClass *RC, + SmallVectorImpl<MachineInstr*> &NewMIs) const { + assert(0 && "Target didn't implement TargetInstrInfo::storeRegToAddr!"); + } + + /// loadRegFromStackSlot - Load the specified register of the given register + /// class from the specified stack frame index. The load instruction is to be + /// added to the given machine basic block before the specified machine + /// instruction. + virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, int FrameIndex, + const TargetRegisterClass *RC) const { + assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromStackSlot!"); + } + + /// loadRegFromAddr - Load the specified register of the given register class + /// class from the specified address. The load instruction is to be added to + /// the given machine basic block before the specified machine instruction. + virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg, + SmallVectorImpl<MachineOperand> &Addr, + const TargetRegisterClass *RC, + SmallVectorImpl<MachineInstr*> &NewMIs) const { + assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromAddr!"); + } + + /// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee + /// saved registers and returns true if it isn't possible / profitable to do + /// so by issuing a series of store instructions via + /// storeRegToStackSlot(). Returns false otherwise. + virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector<CalleeSavedInfo> &CSI) const { + return false; + } + + /// restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee + /// saved registers and returns true if it isn't possible / profitable to do + /// so by issuing a series of load instructions via loadRegToStackSlot(). + /// Returns false otherwise. + virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector<CalleeSavedInfo> &CSI) const { + return false; + } + + /// foldMemoryOperand - Attempt to fold a load or store of the specified stack + /// slot into the specified machine instruction for the specified operand(s). + /// If this is possible, a new instruction is returned with the specified + /// operand folded, otherwise NULL is returned. The client is responsible for + /// removing the old instruction and adding the new one in the instruction + /// stream. + MachineInstr* foldMemoryOperand(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl<unsigned> &Ops, + int FrameIndex) const; + + /// foldMemoryOperand - Same as the previous version except it allows folding + /// of any load and store from / to any address, not just from a specific + /// stack slot. + MachineInstr* foldMemoryOperand(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl<unsigned> &Ops, + MachineInstr* LoadMI) const; + +protected: + /// foldMemoryOperandImpl - Target-dependent implementation for + /// foldMemoryOperand. Target-independent code in foldMemoryOperand will + /// take care of adding a MachineMemOperand to the newly created instruction. + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl<unsigned> &Ops, + int FrameIndex) const { + return 0; + } + + /// foldMemoryOperandImpl - Target-dependent implementation for + /// foldMemoryOperand. Target-independent code in foldMemoryOperand will + /// take care of adding a MachineMemOperand to the newly created instruction. + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl<unsigned> &Ops, + MachineInstr* LoadMI) const { + return 0; + } + +public: + /// canFoldMemoryOperand - Returns true for the specified load / store if + /// folding is possible. + virtual + bool canFoldMemoryOperand(const MachineInstr *MI, + const SmallVectorImpl<unsigned> &Ops) const { + return false; + } + + /// unfoldMemoryOperand - Separate a single instruction which folded a load or + /// a store or a load and a store into two or more instruction. If this is + /// possible, returns true as well as the new instructions by reference. + virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, + unsigned Reg, bool UnfoldLoad, bool UnfoldStore, + SmallVectorImpl<MachineInstr*> &NewMIs) const{ + return false; + } + + virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, + SmallVectorImpl<SDNode*> &NewNodes) const { + return false; + } + + /// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new + /// instruction after load / store are unfolded from an instruction of the + /// specified opcode. It returns zero if the specified unfolding is not + /// possible. + virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, + bool UnfoldLoad, bool UnfoldStore) const { + return 0; + } + + /// BlockHasNoFallThrough - Return true if the specified block does not + /// fall-through into its successor block. This is primarily used when a + /// branch is unanalyzable. It is useful for things like unconditional + /// indirect branches (jump tables). + virtual bool BlockHasNoFallThrough(const MachineBasicBlock &MBB) const { + return false; + } + + /// ReverseBranchCondition - Reverses the branch condition of the specified + /// condition list, returning false on success and true if it cannot be + /// reversed. + virtual + bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { + return true; + } + + /// insertNoop - Insert a noop into the instruction stream at the specified + /// point. + virtual void insertNoop(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI) const { + assert(0 && "Target didn't implement insertNoop!"); + abort(); + } + + /// isPredicated - Returns true if the instruction is already predicated. + /// + virtual bool isPredicated(const MachineInstr *MI) const { + return false; + } + + /// isUnpredicatedTerminator - Returns true if the instruction is a + /// terminator instruction that has not been predicated. + virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const; + + /// PredicateInstruction - Convert the instruction into a predicated + /// instruction. It returns true if the operation was successful. + virtual + bool PredicateInstruction(MachineInstr *MI, + const SmallVectorImpl<MachineOperand> &Pred) const = 0; + + /// SubsumesPredicate - Returns true if the first specified predicate + /// subsumes the second, e.g. GE subsumes GT. + virtual + bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1, + const SmallVectorImpl<MachineOperand> &Pred2) const { + return false; + } + + /// DefinesPredicate - If the specified instruction defines any predicate + /// or condition code register(s) used for predication, returns true as well + /// as the definition predicate(s) by reference. + virtual bool DefinesPredicate(MachineInstr *MI, + std::vector<MachineOperand> &Pred) const { + return false; + } + + /// isSafeToMoveRegClassDefs - Return true if it's safe to move a machine + /// instruction that defines the specified register class. + virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { + return true; + } + + /// GetInstSize - Returns the size of the specified Instruction. + /// + virtual unsigned GetInstSizeInBytes(const MachineInstr *MI) const { + assert(0 && "Target didn't implement TargetInstrInfo::GetInstSize!"); + return 0; + } + + /// GetFunctionSizeInBytes - Returns the size of the specified MachineFunction. + /// + virtual unsigned GetFunctionSizeInBytes(const MachineFunction &MF) const = 0; +}; + +/// TargetInstrInfoImpl - This is the default implementation of +/// TargetInstrInfo, which just provides a couple of default implementations +/// for various methods. This separated out because it is implemented in +/// libcodegen, not in libtarget. +class TargetInstrInfoImpl : public TargetInstrInfo { +protected: + TargetInstrInfoImpl(const TargetInstrDesc *desc, unsigned NumOpcodes) + : TargetInstrInfo(desc, NumOpcodes) {} +public: + virtual MachineInstr *commuteInstruction(MachineInstr *MI, + bool NewMI = false) const; + virtual bool CommuteChangesDestination(MachineInstr *MI, + unsigned &OpIdx) const; + virtual bool PredicateInstruction(MachineInstr *MI, + const SmallVectorImpl<MachineOperand> &Pred) const; + virtual void reMaterialize(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, + const MachineInstr *Orig) const; + virtual unsigned GetFunctionSizeInBytes(const MachineFunction &MF) const; +}; + +/// getInstrOperandRegClass - Return register class of the operand of an +/// instruction of the specified TargetInstrDesc. +const TargetRegisterClass* +getInstrOperandRegClass(const TargetRegisterInfo *TRI, + const TargetInstrDesc &II, unsigned Op); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetInstrItineraries.h b/include/llvm/Target/TargetInstrItineraries.h new file mode 100644 index 0000000000000..18931ea7fb486 --- /dev/null +++ b/include/llvm/Target/TargetInstrItineraries.h @@ -0,0 +1,99 @@ +//===-- llvm/Target/TargetInstrItineraries.h - Scheduling -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes the structures used for instruction itineraries and +// states. This is used by schedulers to determine instruction states and +// latencies. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETINSTRITINERARIES_H +#define LLVM_TARGET_TARGETINSTRITINERARIES_H + +namespace llvm { + +//===----------------------------------------------------------------------===// +/// Instruction stage - These values represent a step in the execution of an +/// instruction. The latency represents the number of discrete time slots used +/// need to complete the stage. Units represent the choice of functional units +/// that can be used to complete the stage. Eg. IntUnit1, IntUnit2. +/// +struct InstrStage { + unsigned Cycles; ///< Length of stage in machine cycles + unsigned Units; ///< Choice of functional units +}; + + +//===----------------------------------------------------------------------===// +/// Instruction itinerary - An itinerary represents a sequential series of steps +/// required to complete an instruction. Itineraries are represented as +/// sequences of instruction stages. +/// +struct InstrItinerary { + unsigned First; ///< Index of first stage in itinerary + unsigned Last; ///< Index of last + 1 stage in itinerary +}; + + + +//===----------------------------------------------------------------------===// +/// Instruction itinerary Data - Itinerary data supplied by a subtarget to be +/// used by a target. +/// +struct InstrItineraryData { + const InstrStage *Stages; ///< Array of stages selected + const InstrItinerary *Itineratries; ///< Array of itineraries selected + + /// Ctors. + /// + InstrItineraryData() : Stages(0), Itineratries(0) {} + InstrItineraryData(const InstrStage *S, const InstrItinerary *I) + : Stages(S), Itineratries(I) {} + + /// isEmpty - Returns true if there are no itineraries. + /// + bool isEmpty() const { return Itineratries == 0; } + + /// begin - Return the first stage of the itinerary. + /// + const InstrStage *begin(unsigned ItinClassIndx) const { + unsigned StageIdx = Itineratries[ItinClassIndx].First; + return Stages + StageIdx; + } + + /// end - Return the last+1 stage of the itinerary. + /// + const InstrStage *end(unsigned ItinClassIndx) const { + unsigned StageIdx = Itineratries[ItinClassIndx].Last; + return Stages + StageIdx; + } + + /// getLatency - Return the scheduling latency of the given class. A + /// simple latency value for an instruction is an over-simplification + /// for some architectures, but it's a reasonable first approximation. + /// + unsigned getLatency(unsigned ItinClassIndx) const { + // If the target doesn't provide latency information, use a simple + // non-zero default value for all instructions. + if (isEmpty()) + return 1; + + // Just sum the cycle count for each stage. + unsigned Latency = 0; + for (const InstrStage *IS = begin(ItinClassIndx), *E = end(ItinClassIndx); + IS != E; ++IS) + Latency += IS->Cycles; + return Latency; + } +}; + + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetIntrinsicInfo.h b/include/llvm/Target/TargetIntrinsicInfo.h new file mode 100644 index 0000000000000..c14275f52a4ce --- /dev/null +++ b/include/llvm/Target/TargetIntrinsicInfo.h @@ -0,0 +1,61 @@ +//===-- llvm/Target/TargetIntrinsicInfo.h - Instruction Info ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes the target intrinsic instructions to the code generator. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETINTRINSICINFO_H +#define LLVM_TARGET_TARGETINTRINSICINFO_H + +namespace llvm { + +class Function; +class Module; +class Type; + +//--------------------------------------------------------------------------- +/// +/// TargetIntrinsicInfo - Interface to description of machine instruction set +/// +class TargetIntrinsicInfo { + + const char **Intrinsics; // Raw array to allow static init'n + unsigned NumIntrinsics; // Number of entries in the desc array + + TargetIntrinsicInfo(const TargetIntrinsicInfo &); // DO NOT IMPLEMENT + void operator=(const TargetIntrinsicInfo &); // DO NOT IMPLEMENT +public: + TargetIntrinsicInfo(const char **desc, unsigned num); + virtual ~TargetIntrinsicInfo(); + + unsigned getNumIntrinsics() const { return NumIntrinsics; } + + virtual Function *getDeclaration(Module *M, const char *BuiltinName) const { + return 0; + } + + // Returns the Function declaration for intrinsic BuiltinName. If the + // intrinsic can be overloaded, uses Tys to return the correct function. + virtual Function *getDeclaration(Module *M, const char *BuiltinName, + const Type **Tys, unsigned numTys) const { + return 0; + } + + // Returns true if the Builtin can be overloaded. + virtual bool isOverloaded(Module *M, const char *BuiltinName) const { + return false; + } + + virtual unsigned getIntrinsicID(Function *F) const { return 0; } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetJITInfo.h b/include/llvm/Target/TargetJITInfo.h new file mode 100644 index 0000000000000..9545689cb711a --- /dev/null +++ b/include/llvm/Target/TargetJITInfo.h @@ -0,0 +1,135 @@ +//===- Target/TargetJITInfo.h - Target Information for JIT ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file exposes an abstract interface used by the Just-In-Time code +// generator to perform target-specific activities, such as emitting stubs. If +// a TargetMachine supports JIT code generation, it should provide one of these +// objects through the getJITInfo() method. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETJITINFO_H +#define LLVM_TARGET_TARGETJITINFO_H + +#include <cassert> +#include "llvm/Support/DataTypes.h" + +namespace llvm { + class Function; + class GlobalValue; + class JITCodeEmitter; + class MachineRelocation; + + /// TargetJITInfo - Target specific information required by the Just-In-Time + /// code generator. + class TargetJITInfo { + public: + virtual ~TargetJITInfo() {} + + /// replaceMachineCodeForFunction - Make it so that calling the function + /// whose machine code is at OLD turns into a call to NEW, perhaps by + /// overwriting OLD with a branch to NEW. This is used for self-modifying + /// code. + /// + virtual void replaceMachineCodeForFunction(void *Old, void *New) = 0; + + /// emitGlobalValueIndirectSym - Use the specified JITCodeEmitter object + /// to emit an indirect symbol which contains the address of the specified + /// ptr. + virtual void *emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr, + JITCodeEmitter &JCE) { + assert(0 && "This target doesn't implement emitGlobalValueIndirectSym!"); + return 0; + } + + /// emitFunctionStub - Use the specified JITCodeEmitter object to emit a + /// small native function that simply calls the function at the specified + /// address. Return the address of the resultant function. + virtual void *emitFunctionStub(const Function* F, void *Fn, + JITCodeEmitter &JCE) { + assert(0 && "This target doesn't implement emitFunctionStub!"); + return 0; + } + + /// emitFunctionStubAtAddr - Use the specified JITCodeEmitter object to + /// emit a small native function that simply calls Fn. Emit the stub into + /// the supplied buffer. + virtual void emitFunctionStubAtAddr(const Function* F, void *Fn, + void *Buffer, JITCodeEmitter &JCE) { + assert(0 && "This target doesn't implement emitFunctionStubAtAddr!"); + } + + /// getPICJumpTableEntry - Returns the value of the jumptable entry for the + /// specific basic block. + virtual uintptr_t getPICJumpTableEntry(uintptr_t BB, uintptr_t JTBase) { + assert(0 && "This target doesn't implement getPICJumpTableEntry!"); + return 0; + } + + /// LazyResolverFn - This typedef is used to represent the function that + /// unresolved call points should invoke. This is a target specific + /// function that knows how to walk the stack and find out which stub the + /// call is coming from. + typedef void (*LazyResolverFn)(); + + /// JITCompilerFn - This typedef is used to represent the JIT function that + /// lazily compiles the function corresponding to a stub. The JIT keeps + /// track of the mapping between stubs and LLVM Functions, the target + /// provides the ability to figure out the address of a stub that is called + /// by the LazyResolverFn. + typedef void* (*JITCompilerFn)(void *); + + /// getLazyResolverFunction - This method is used to initialize the JIT, + /// giving the target the function that should be used to compile a + /// function, and giving the JIT the target function used to do the lazy + /// resolving. + virtual LazyResolverFn getLazyResolverFunction(JITCompilerFn) { + assert(0 && "Not implemented for this target!"); + return 0; + } + + /// relocate - Before the JIT can run a block of code that has been emitted, + /// it must rewrite the code to contain the actual addresses of any + /// referenced global symbols. + virtual void relocate(void *Function, MachineRelocation *MR, + unsigned NumRelocs, unsigned char* GOTBase) { + assert(NumRelocs == 0 && "This target does not have relocations!"); + } + + + /// allocateThreadLocalMemory - Each target has its own way of + /// handling thread local variables. This method returns a value only + /// meaningful to the target. + virtual char* allocateThreadLocalMemory(size_t size) { + assert(0 && "This target does not implement thread local storage!"); + return 0; + } + + /// needsGOT - Allows a target to specify that it would like the + /// JIT to manage a GOT for it. + bool needsGOT() const { return useGOT; } + + /// hasCustomConstantPool - Allows a target to specify that constant + /// pool address resolution is handled by the target. + virtual bool hasCustomConstantPool() const { return false; } + + /// hasCustomJumpTables - Allows a target to specify that jumptables + /// are emitted by the target. + virtual bool hasCustomJumpTables() const { return false; } + + /// allocateSeparateGVMemory - If true, globals should be placed in + /// separately allocated heap memory rather than in the same + /// code memory allocated by JITCodeEmitter. + virtual bool allocateSeparateGVMemory() const { return false; } + protected: + bool useGOT; + }; +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h new file mode 100644 index 0000000000000..163f4c5ae50eb --- /dev/null +++ b/include/llvm/Target/TargetLowering.h @@ -0,0 +1,1676 @@ +//===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes how to lower LLVM code to machine code. This has two +// main components: +// +// 1. Which ValueTypes are natively supported by the target. +// 2. Which operations are supported for supported ValueTypes. +// 3. Cost thresholds for alternative implementations of certain operations. +// +// In addition it has a few other components, like information about FP +// immediates. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETLOWERING_H +#define LLVM_TARGET_TARGETLOWERING_H + +#include "llvm/InlineAsm.h" +#include "llvm/CodeGen/SelectionDAGNodes.h" +#include "llvm/CodeGen/RuntimeLibcalls.h" +#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/CodeGen/DebugLoc.h" +#include "llvm/Target/TargetMachine.h" +#include <climits> +#include <map> +#include <vector> + +namespace llvm { + class AllocaInst; + class CallInst; + class Function; + class FastISel; + class MachineBasicBlock; + class MachineFunction; + class MachineFrameInfo; + class MachineInstr; + class MachineModuleInfo; + class DwarfWriter; + class SDNode; + class SDValue; + class SelectionDAG; + class TargetData; + class TargetMachine; + class TargetRegisterClass; + class TargetSubtarget; + class Value; + + // FIXME: should this be here? + namespace TLSModel { + enum Model { + GeneralDynamic, + LocalDynamic, + InitialExec, + LocalExec + }; + } + TLSModel::Model getTLSModel(const GlobalValue *GV, Reloc::Model reloc); + + +//===----------------------------------------------------------------------===// +/// TargetLowering - This class defines information used to lower LLVM code to +/// legal SelectionDAG operators that the target instruction selector can accept +/// natively. +/// +/// This class also defines callbacks that targets must implement to lower +/// target-specific constructs to SelectionDAG operators. +/// +class TargetLowering { +public: + /// LegalizeAction - This enum indicates whether operations are valid for a + /// target, and if not, what action should be used to make them valid. + enum LegalizeAction { + Legal, // The target natively supports this operation. + Promote, // This operation should be executed in a larger type. + Expand, // Try to expand this to other ops, otherwise use a libcall. + Custom // Use the LowerOperation hook to implement custom lowering. + }; + + enum OutOfRangeShiftAmount { + Undefined, // Oversized shift amounts are undefined (default). + Mask, // Shift amounts are auto masked (anded) to value size. + Extend // Oversized shift pulls in zeros or sign bits. + }; + + enum BooleanContent { // How the target represents true/false values. + UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage. + ZeroOrOneBooleanContent, // All bits zero except for bit 0. + ZeroOrNegativeOneBooleanContent // All bits equal to bit 0. + }; + + enum SchedPreference { + SchedulingForLatency, // Scheduling for shortest total latency. + SchedulingForRegPressure // Scheduling for lowest register pressure. + }; + + explicit TargetLowering(TargetMachine &TM); + virtual ~TargetLowering(); + + TargetMachine &getTargetMachine() const { return TM; } + const TargetData *getTargetData() const { return TD; } + + bool isBigEndian() const { return !IsLittleEndian; } + bool isLittleEndian() const { return IsLittleEndian; } + MVT getPointerTy() const { return PointerTy; } + MVT getShiftAmountTy() const { return ShiftAmountTy; } + OutOfRangeShiftAmount getShiftAmountFlavor() const {return ShiftAmtHandling; } + + /// usesGlobalOffsetTable - Return true if this target uses a GOT for PIC + /// codegen. + bool usesGlobalOffsetTable() const { return UsesGlobalOffsetTable; } + + /// isSelectExpensive - Return true if the select operation is expensive for + /// this target. + bool isSelectExpensive() const { return SelectIsExpensive; } + + /// isIntDivCheap() - Return true if integer divide is usually cheaper than + /// a sequence of several shifts, adds, and multiplies for this target. + bool isIntDivCheap() const { return IntDivIsCheap; } + + /// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of + /// srl/add/sra. + bool isPow2DivCheap() const { return Pow2DivIsCheap; } + + /// getSetCCResultType - Return the ValueType of the result of SETCC + /// operations. Also used to obtain the target's preferred type for + /// the condition operand of SELECT and BRCOND nodes. In the case of + /// BRCOND the argument passed is MVT::Other since there are no other + /// operands to get a type hint from. + virtual MVT getSetCCResultType(MVT VT) const; + + /// getBooleanContents - For targets without i1 registers, this gives the + /// nature of the high-bits of boolean values held in types wider than i1. + /// "Boolean values" are special true/false values produced by nodes like + /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND. + /// Not to be confused with general values promoted from i1. + BooleanContent getBooleanContents() const { return BooleanContents;} + + /// getSchedulingPreference - Return target scheduling preference. + SchedPreference getSchedulingPreference() const { + return SchedPreferenceInfo; + } + + /// getRegClassFor - Return the register class that should be used for the + /// specified value type. This may only be called on legal types. + TargetRegisterClass *getRegClassFor(MVT VT) const { + assert((unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); + TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT()]; + assert(RC && "This value type is not natively supported!"); + return RC; + } + + /// isTypeLegal - Return true if the target has native support for the + /// specified value type. This means that it has a register that directly + /// holds it without promotions or expansions. + bool isTypeLegal(MVT VT) const { + assert(!VT.isSimple() || + (unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); + return VT.isSimple() && RegClassForVT[VT.getSimpleVT()] != 0; + } + + class ValueTypeActionImpl { + /// ValueTypeActions - This is a bitvector that contains two bits for each + /// value type, where the two bits correspond to the LegalizeAction enum. + /// This can be queried with "getTypeAction(VT)". + uint32_t ValueTypeActions[2]; + public: + ValueTypeActionImpl() { + ValueTypeActions[0] = ValueTypeActions[1] = 0; + } + ValueTypeActionImpl(const ValueTypeActionImpl &RHS) { + ValueTypeActions[0] = RHS.ValueTypeActions[0]; + ValueTypeActions[1] = RHS.ValueTypeActions[1]; + } + + LegalizeAction getTypeAction(MVT VT) const { + if (VT.isExtended()) { + if (VT.isVector()) { + return VT.isPow2VectorType() ? Expand : Promote; + } + if (VT.isInteger()) + // First promote to a power-of-two size, then expand if necessary. + return VT == VT.getRoundIntegerType() ? Expand : Promote; + assert(0 && "Unsupported extended type!"); + return Legal; + } + unsigned I = VT.getSimpleVT(); + assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0])); + return (LegalizeAction)((ValueTypeActions[I>>4] >> ((2*I) & 31)) & 3); + } + void setTypeAction(MVT VT, LegalizeAction Action) { + unsigned I = VT.getSimpleVT(); + assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0])); + ValueTypeActions[I>>4] |= Action << ((I*2) & 31); + } + }; + + const ValueTypeActionImpl &getValueTypeActions() const { + return ValueTypeActions; + } + + /// getTypeAction - Return how we should legalize values of this type, either + /// it is already legal (return 'Legal') or we need to promote it to a larger + /// type (return 'Promote'), or we need to expand it into multiple registers + /// of smaller integer type (return 'Expand'). 'Custom' is not an option. + LegalizeAction getTypeAction(MVT VT) const { + return ValueTypeActions.getTypeAction(VT); + } + + /// getTypeToTransformTo - For types supported by the target, this is an + /// identity function. For types that must be promoted to larger types, this + /// returns the larger type to promote to. For integer types that are larger + /// than the largest integer register, this contains one step in the expansion + /// to get to the smaller register. For illegal floating point types, this + /// returns the integer type to transform to. + MVT getTypeToTransformTo(MVT VT) const { + if (VT.isSimple()) { + assert((unsigned)VT.getSimpleVT() < array_lengthof(TransformToType)); + MVT NVT = TransformToType[VT.getSimpleVT()]; + assert(getTypeAction(NVT) != Promote && + "Promote may not follow Expand or Promote"); + return NVT; + } + + if (VT.isVector()) { + MVT NVT = VT.getPow2VectorType(); + if (NVT == VT) { + // Vector length is a power of 2 - split to half the size. + unsigned NumElts = VT.getVectorNumElements(); + MVT EltVT = VT.getVectorElementType(); + return (NumElts == 1) ? EltVT : MVT::getVectorVT(EltVT, NumElts / 2); + } + // Promote to a power of two size, avoiding multi-step promotion. + return getTypeAction(NVT) == Promote ? getTypeToTransformTo(NVT) : NVT; + } else if (VT.isInteger()) { + MVT NVT = VT.getRoundIntegerType(); + if (NVT == VT) + // Size is a power of two - expand to half the size. + return MVT::getIntegerVT(VT.getSizeInBits() / 2); + else + // Promote to a power of two size, avoiding multi-step promotion. + return getTypeAction(NVT) == Promote ? getTypeToTransformTo(NVT) : NVT; + } + assert(0 && "Unsupported extended type!"); + return MVT(); // Not reached + } + + /// getTypeToExpandTo - For types supported by the target, this is an + /// identity function. For types that must be expanded (i.e. integer types + /// that are larger than the largest integer register or illegal floating + /// point types), this returns the largest legal type it will be expanded to. + MVT getTypeToExpandTo(MVT VT) const { + assert(!VT.isVector()); + while (true) { + switch (getTypeAction(VT)) { + case Legal: + return VT; + case Expand: + VT = getTypeToTransformTo(VT); + break; + default: + assert(false && "Type is not legal nor is it to be expanded!"); + return VT; + } + } + return VT; + } + + /// getVectorTypeBreakdown - Vector types are broken down into some number of + /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 + /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. + /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. + /// + /// This method returns the number of registers needed, and the VT for each + /// register. It also returns the VT and quantity of the intermediate values + /// before they are promoted/expanded. + /// + unsigned getVectorTypeBreakdown(MVT VT, + MVT &IntermediateVT, + unsigned &NumIntermediates, + MVT &RegisterVT) const; + + /// getTgtMemIntrinsic: Given an intrinsic, checks if on the target the + /// intrinsic will need to map to a MemIntrinsicNode (touches memory). If + /// this is the case, it returns true and store the intrinsic + /// information into the IntrinsicInfo that was passed to the function. + typedef struct IntrinsicInfo { + unsigned opc; // target opcode + MVT memVT; // memory VT + const Value* ptrVal; // value representing memory location + int offset; // offset off of ptrVal + unsigned align; // alignment + bool vol; // is volatile? + bool readMem; // reads memory? + bool writeMem; // writes memory? + } IntrinisicInfo; + + virtual bool getTgtMemIntrinsic(IntrinsicInfo& Info, + CallInst &I, unsigned Intrinsic) { + return false; + } + + /// getWidenVectorType: given a vector type, returns the type to widen to + /// (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself. + /// If there is no vector type that we want to widen to, returns MVT::Other + /// When and were to widen is target dependent based on the cost of + /// scalarizing vs using the wider vector type. + virtual MVT getWidenVectorType(MVT VT) const; + + typedef std::vector<APFloat>::const_iterator legal_fpimm_iterator; + legal_fpimm_iterator legal_fpimm_begin() const { + return LegalFPImmediates.begin(); + } + legal_fpimm_iterator legal_fpimm_end() const { + return LegalFPImmediates.end(); + } + + /// isShuffleMaskLegal - Targets can use this to indicate that they only + /// support *some* VECTOR_SHUFFLE operations, those with specific masks. + /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values + /// are assumed to be legal. + virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask, + MVT VT) const { + return true; + } + + /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is + /// used by Targets can use this to indicate if there is a suitable + /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant + /// pool entry. + virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, + MVT VT) const { + return false; + } + + /// getOperationAction - Return how this operation should be treated: either + /// it is legal, needs to be promoted to a larger size, needs to be + /// expanded to some other code sequence, or the target has a custom expander + /// for it. + LegalizeAction getOperationAction(unsigned Op, MVT VT) const { + if (VT.isExtended()) return Expand; + assert(Op < array_lengthof(OpActions) && + (unsigned)VT.getSimpleVT() < sizeof(OpActions[0])*4 && + "Table isn't big enough!"); + return (LegalizeAction)((OpActions[Op] >> (2*VT.getSimpleVT())) & 3); + } + + /// isOperationLegalOrCustom - Return true if the specified operation is + /// legal on this target or can be made legal with custom lowering. This + /// is used to help guide high-level lowering decisions. + bool isOperationLegalOrCustom(unsigned Op, MVT VT) const { + return (VT == MVT::Other || isTypeLegal(VT)) && + (getOperationAction(Op, VT) == Legal || + getOperationAction(Op, VT) == Custom); + } + + /// isOperationLegal - Return true if the specified operation is legal on this + /// target. + bool isOperationLegal(unsigned Op, MVT VT) const { + return (VT == MVT::Other || isTypeLegal(VT)) && + getOperationAction(Op, VT) == Legal; + } + + /// getLoadExtAction - Return how this load with extension should be treated: + /// either it is legal, needs to be promoted to a larger size, needs to be + /// expanded to some other code sequence, or the target has a custom expander + /// for it. + LegalizeAction getLoadExtAction(unsigned LType, MVT VT) const { + assert(LType < array_lengthof(LoadExtActions) && + (unsigned)VT.getSimpleVT() < sizeof(LoadExtActions[0])*4 && + "Table isn't big enough!"); + return (LegalizeAction)((LoadExtActions[LType] >> (2*VT.getSimpleVT())) & 3); + } + + /// isLoadExtLegal - Return true if the specified load with extension is legal + /// on this target. + bool isLoadExtLegal(unsigned LType, MVT VT) const { + return VT.isSimple() && + (getLoadExtAction(LType, VT) == Legal || + getLoadExtAction(LType, VT) == Custom); + } + + /// getTruncStoreAction - Return how this store with truncation should be + /// treated: either it is legal, needs to be promoted to a larger size, needs + /// to be expanded to some other code sequence, or the target has a custom + /// expander for it. + LegalizeAction getTruncStoreAction(MVT ValVT, + MVT MemVT) const { + assert((unsigned)ValVT.getSimpleVT() < array_lengthof(TruncStoreActions) && + (unsigned)MemVT.getSimpleVT() < sizeof(TruncStoreActions[0])*4 && + "Table isn't big enough!"); + return (LegalizeAction)((TruncStoreActions[ValVT.getSimpleVT()] >> + (2*MemVT.getSimpleVT())) & 3); + } + + /// isTruncStoreLegal - Return true if the specified store with truncation is + /// legal on this target. + bool isTruncStoreLegal(MVT ValVT, MVT MemVT) const { + return isTypeLegal(ValVT) && MemVT.isSimple() && + (getTruncStoreAction(ValVT, MemVT) == Legal || + getTruncStoreAction(ValVT, MemVT) == Custom); + } + + /// getIndexedLoadAction - Return how the indexed load should be treated: + /// either it is legal, needs to be promoted to a larger size, needs to be + /// expanded to some other code sequence, or the target has a custom expander + /// for it. + LegalizeAction + getIndexedLoadAction(unsigned IdxMode, MVT VT) const { + assert(IdxMode < array_lengthof(IndexedModeActions[0]) && + (unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[0][0])*4 && + "Table isn't big enough!"); + return (LegalizeAction)((IndexedModeActions[0][IdxMode] >> + (2*VT.getSimpleVT())) & 3); + } + + /// isIndexedLoadLegal - Return true if the specified indexed load is legal + /// on this target. + bool isIndexedLoadLegal(unsigned IdxMode, MVT VT) const { + return VT.isSimple() && + (getIndexedLoadAction(IdxMode, VT) == Legal || + getIndexedLoadAction(IdxMode, VT) == Custom); + } + + /// getIndexedStoreAction - Return how the indexed store should be treated: + /// either it is legal, needs to be promoted to a larger size, needs to be + /// expanded to some other code sequence, or the target has a custom expander + /// for it. + LegalizeAction + getIndexedStoreAction(unsigned IdxMode, MVT VT) const { + assert(IdxMode < array_lengthof(IndexedModeActions[1]) && + (unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[1][0])*4 && + "Table isn't big enough!"); + return (LegalizeAction)((IndexedModeActions[1][IdxMode] >> + (2*VT.getSimpleVT())) & 3); + } + + /// isIndexedStoreLegal - Return true if the specified indexed load is legal + /// on this target. + bool isIndexedStoreLegal(unsigned IdxMode, MVT VT) const { + return VT.isSimple() && + (getIndexedStoreAction(IdxMode, VT) == Legal || + getIndexedStoreAction(IdxMode, VT) == Custom); + } + + /// getConvertAction - Return how the conversion should be treated: + /// either it is legal, needs to be promoted to a larger size, needs to be + /// expanded to some other code sequence, or the target has a custom expander + /// for it. + LegalizeAction + getConvertAction(MVT FromVT, MVT ToVT) const { + assert((unsigned)FromVT.getSimpleVT() < array_lengthof(ConvertActions) && + (unsigned)ToVT.getSimpleVT() < sizeof(ConvertActions[0])*4 && + "Table isn't big enough!"); + return (LegalizeAction)((ConvertActions[FromVT.getSimpleVT()] >> + (2*ToVT.getSimpleVT())) & 3); + } + + /// isConvertLegal - Return true if the specified conversion is legal + /// on this target. + bool isConvertLegal(MVT FromVT, MVT ToVT) const { + return isTypeLegal(FromVT) && isTypeLegal(ToVT) && + (getConvertAction(FromVT, ToVT) == Legal || + getConvertAction(FromVT, ToVT) == Custom); + } + + /// getCondCodeAction - Return how the condition code should be treated: + /// either it is legal, needs to be expanded to some other code sequence, + /// or the target has a custom expander for it. + LegalizeAction + getCondCodeAction(ISD::CondCode CC, MVT VT) const { + assert((unsigned)CC < array_lengthof(CondCodeActions) && + (unsigned)VT.getSimpleVT() < sizeof(CondCodeActions[0])*4 && + "Table isn't big enough!"); + LegalizeAction Action = (LegalizeAction) + ((CondCodeActions[CC] >> (2*VT.getSimpleVT())) & 3); + assert(Action != Promote && "Can't promote condition code!"); + return Action; + } + + /// isCondCodeLegal - Return true if the specified condition code is legal + /// on this target. + bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const { + return getCondCodeAction(CC, VT) == Legal || + getCondCodeAction(CC, VT) == Custom; + } + + + /// getTypeToPromoteTo - If the action for this operation is to promote, this + /// method returns the ValueType to promote to. + MVT getTypeToPromoteTo(unsigned Op, MVT VT) const { + assert(getOperationAction(Op, VT) == Promote && + "This operation isn't promoted!"); + + // See if this has an explicit type specified. + std::map<std::pair<unsigned, MVT::SimpleValueType>, + MVT::SimpleValueType>::const_iterator PTTI = + PromoteToType.find(std::make_pair(Op, VT.getSimpleVT())); + if (PTTI != PromoteToType.end()) return PTTI->second; + + assert((VT.isInteger() || VT.isFloatingPoint()) && + "Cannot autopromote this type, add it with AddPromotedToType."); + + MVT NVT = VT; + do { + NVT = (MVT::SimpleValueType)(NVT.getSimpleVT()+1); + assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && + "Didn't find type to promote to!"); + } while (!isTypeLegal(NVT) || + getOperationAction(Op, NVT) == Promote); + return NVT; + } + + /// getValueType - Return the MVT corresponding to this LLVM type. + /// This is fixed by the LLVM operations except for the pointer size. If + /// AllowUnknown is true, this will return MVT::Other for types with no MVT + /// counterpart (e.g. structs), otherwise it will assert. + MVT getValueType(const Type *Ty, bool AllowUnknown = false) const { + MVT VT = MVT::getMVT(Ty, AllowUnknown); + return VT == MVT::iPTR ? PointerTy : VT; + } + + /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate + /// function arguments in the caller parameter area. This is the actual + /// alignment, not its logarithm. + virtual unsigned getByValTypeAlignment(const Type *Ty) const; + + /// getRegisterType - Return the type of registers that this ValueType will + /// eventually require. + MVT getRegisterType(MVT VT) const { + if (VT.isSimple()) { + assert((unsigned)VT.getSimpleVT() < array_lengthof(RegisterTypeForVT)); + return RegisterTypeForVT[VT.getSimpleVT()]; + } + if (VT.isVector()) { + MVT VT1, RegisterVT; + unsigned NumIntermediates; + (void)getVectorTypeBreakdown(VT, VT1, NumIntermediates, RegisterVT); + return RegisterVT; + } + if (VT.isInteger()) { + return getRegisterType(getTypeToTransformTo(VT)); + } + assert(0 && "Unsupported extended type!"); + return MVT(); // Not reached + } + + /// getNumRegisters - Return the number of registers that this ValueType will + /// eventually require. This is one for any types promoted to live in larger + /// registers, but may be more than one for types (like i64) that are split + /// into pieces. For types like i140, which are first promoted then expanded, + /// it is the number of registers needed to hold all the bits of the original + /// type. For an i140 on a 32 bit machine this means 5 registers. + unsigned getNumRegisters(MVT VT) const { + if (VT.isSimple()) { + assert((unsigned)VT.getSimpleVT() < array_lengthof(NumRegistersForVT)); + return NumRegistersForVT[VT.getSimpleVT()]; + } + if (VT.isVector()) { + MVT VT1, VT2; + unsigned NumIntermediates; + return getVectorTypeBreakdown(VT, VT1, NumIntermediates, VT2); + } + if (VT.isInteger()) { + unsigned BitWidth = VT.getSizeInBits(); + unsigned RegWidth = getRegisterType(VT).getSizeInBits(); + return (BitWidth + RegWidth - 1) / RegWidth; + } + assert(0 && "Unsupported extended type!"); + return 0; // Not reached + } + + /// ShouldShrinkFPConstant - If true, then instruction selection should + /// seek to shrink the FP constant of the specified type to a smaller type + /// in order to save space and / or reduce runtime. + virtual bool ShouldShrinkFPConstant(MVT VT) const { return true; } + + /// hasTargetDAGCombine - If true, the target has custom DAG combine + /// transformations that it can perform for the specified node. + bool hasTargetDAGCombine(ISD::NodeType NT) const { + assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); + return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7)); + } + + /// This function returns the maximum number of store operations permitted + /// to replace a call to llvm.memset. The value is set by the target at the + /// performance threshold for such a replacement. + /// @brief Get maximum # of store operations permitted for llvm.memset + unsigned getMaxStoresPerMemset() const { return maxStoresPerMemset; } + + /// This function returns the maximum number of store operations permitted + /// to replace a call to llvm.memcpy. The value is set by the target at the + /// performance threshold for such a replacement. + /// @brief Get maximum # of store operations permitted for llvm.memcpy + unsigned getMaxStoresPerMemcpy() const { return maxStoresPerMemcpy; } + + /// This function returns the maximum number of store operations permitted + /// to replace a call to llvm.memmove. The value is set by the target at the + /// performance threshold for such a replacement. + /// @brief Get maximum # of store operations permitted for llvm.memmove + unsigned getMaxStoresPerMemmove() const { return maxStoresPerMemmove; } + + /// This function returns true if the target allows unaligned memory accesses. + /// This is used, for example, in situations where an array copy/move/set is + /// converted to a sequence of store operations. It's use helps to ensure that + /// such replacements don't generate code that causes an alignment error + /// (trap) on the target machine. + /// @brief Determine if the target supports unaligned memory accesses. + bool allowsUnalignedMemoryAccesses() const { + return allowUnalignedMemoryAccesses; + } + + /// This function returns true if the target would benefit from code placement + /// optimization. + /// @brief Determine if the target should perform code placement optimization. + bool shouldOptimizeCodePlacement() const { + return benefitFromCodePlacementOpt; + } + + /// getOptimalMemOpType - Returns the target specific optimal type for load + /// and store operations as a result of memset, memcpy, and memmove lowering. + /// It returns MVT::iAny if SelectionDAG should be responsible for + /// determining it. + virtual MVT getOptimalMemOpType(uint64_t Size, unsigned Align, + bool isSrcConst, bool isSrcStr) const { + return MVT::iAny; + } + + /// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp + /// to implement llvm.setjmp. + bool usesUnderscoreSetJmp() const { + return UseUnderscoreSetJmp; + } + + /// usesUnderscoreLongJmp - Determine if we should use _longjmp or longjmp + /// to implement llvm.longjmp. + bool usesUnderscoreLongJmp() const { + return UseUnderscoreLongJmp; + } + + /// getStackPointerRegisterToSaveRestore - If a physical register, this + /// specifies the register that llvm.savestack/llvm.restorestack should save + /// and restore. + unsigned getStackPointerRegisterToSaveRestore() const { + return StackPointerRegisterToSaveRestore; + } + + /// getExceptionAddressRegister - If a physical register, this returns + /// the register that receives the exception address on entry to a landing + /// pad. + unsigned getExceptionAddressRegister() const { + return ExceptionPointerRegister; + } + + /// getExceptionSelectorRegister - If a physical register, this returns + /// the register that receives the exception typeid on entry to a landing + /// pad. + unsigned getExceptionSelectorRegister() const { + return ExceptionSelectorRegister; + } + + /// getJumpBufSize - returns the target's jmp_buf size in bytes (if never + /// set, the default is 200) + unsigned getJumpBufSize() const { + return JumpBufSize; + } + + /// getJumpBufAlignment - returns the target's jmp_buf alignment in bytes + /// (if never set, the default is 0) + unsigned getJumpBufAlignment() const { + return JumpBufAlignment; + } + + /// getIfCvtBlockLimit - returns the target specific if-conversion block size + /// limit. Any block whose size is greater should not be predicated. + unsigned getIfCvtBlockSizeLimit() const { + return IfCvtBlockSizeLimit; + } + + /// getIfCvtDupBlockLimit - returns the target specific size limit for a + /// block to be considered for duplication. Any block whose size is greater + /// should not be duplicated to facilitate its predication. + unsigned getIfCvtDupBlockSizeLimit() const { + return IfCvtDupBlockSizeLimit; + } + + /// getPrefLoopAlignment - return the preferred loop alignment. + /// + unsigned getPrefLoopAlignment() const { + return PrefLoopAlignment; + } + + /// getPreIndexedAddressParts - returns true by value, base pointer and + /// offset pointer and addressing mode by reference if the node's address + /// can be legally represented as pre-indexed load / store address. + virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, + SDValue &Offset, + ISD::MemIndexedMode &AM, + SelectionDAG &DAG) const { + return false; + } + + /// getPostIndexedAddressParts - returns true by value, base pointer and + /// offset pointer and addressing mode by reference if this node can be + /// combined with a load / store to form a post-indexed load / store. + virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, + SDValue &Base, SDValue &Offset, + ISD::MemIndexedMode &AM, + SelectionDAG &DAG) const { + return false; + } + + /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC + /// jumptable. + virtual SDValue getPICJumpTableRelocBase(SDValue Table, + SelectionDAG &DAG) const; + + /// isOffsetFoldingLegal - Return true if folding a constant offset + /// with the given GlobalAddress is legal. It is frequently not legal in + /// PIC relocation models. + virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; + + //===--------------------------------------------------------------------===// + // TargetLowering Optimization Methods + // + + /// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two + /// SDValues for returning information from TargetLowering to its clients + /// that want to combine + struct TargetLoweringOpt { + SelectionDAG &DAG; + SDValue Old; + SDValue New; + + explicit TargetLoweringOpt(SelectionDAG &InDAG) : DAG(InDAG) {} + + bool CombineTo(SDValue O, SDValue N) { + Old = O; + New = N; + return true; + } + + /// ShrinkDemandedConstant - Check to see if the specified operand of the + /// specified instruction is a constant integer. If so, check to see if + /// there are any bits set in the constant that are not demanded. If so, + /// shrink the constant and return true. + bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded); + + /// ShrinkDemandedOp - Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the + /// casts are free. This uses isZExtFree and ZERO_EXTEND for the widening + /// cast, but it could be generalized for targets with other types of + /// implicit widening casts. + bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded, + DebugLoc dl); + }; + + /// SimplifyDemandedBits - Look at Op. At this point, we know that only the + /// DemandedMask bits of the result of Op are ever used downstream. If we can + /// use this information to simplify Op, create a new simplified DAG node and + /// return true, returning the original and new nodes in Old and New. + /// Otherwise, analyze the expression and return a mask of KnownOne and + /// KnownZero bits for the expression (used to simplify the caller). + /// The KnownZero/One bits may only be accurate for those bits in the + /// DemandedMask. + bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask, + APInt &KnownZero, APInt &KnownOne, + TargetLoweringOpt &TLO, unsigned Depth = 0) const; + + /// computeMaskedBitsForTargetNode - Determine which of the bits specified in + /// Mask are known to be either zero or one and return them in the + /// KnownZero/KnownOne bitsets. + virtual void computeMaskedBitsForTargetNode(const SDValue Op, + const APInt &Mask, + APInt &KnownZero, + APInt &KnownOne, + const SelectionDAG &DAG, + unsigned Depth = 0) const; + + /// ComputeNumSignBitsForTargetNode - This method can be implemented by + /// targets that want to expose additional information about sign bits to the + /// DAG Combiner. + virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, + unsigned Depth = 0) const; + + struct DAGCombinerInfo { + void *DC; // The DAG Combiner object. + bool BeforeLegalize; + bool CalledByLegalizer; + public: + SelectionDAG &DAG; + + DAGCombinerInfo(SelectionDAG &dag, bool bl, bool cl, void *dc) + : DC(dc), BeforeLegalize(bl), CalledByLegalizer(cl), DAG(dag) {} + + bool isBeforeLegalize() const { return BeforeLegalize; } + bool isCalledByLegalizer() const { return CalledByLegalizer; } + + void AddToWorklist(SDNode *N); + SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To, + bool AddTo = true); + SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true); + SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true); + + void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO); + }; + + /// SimplifySetCC - Try to simplify a setcc built with the specified operands + /// and cc. If it is unable to simplify it, return a null SDValue. + SDValue SimplifySetCC(MVT VT, SDValue N0, SDValue N1, + ISD::CondCode Cond, bool foldBooleans, + DAGCombinerInfo &DCI, DebugLoc dl) const; + + /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the + /// node is a GlobalAddress + offset. + virtual bool + isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) const; + + /// isConsecutiveLoad - Return true if LD (which must be a LoadSDNode) is + /// loading 'Bytes' bytes from a location that is 'Dist' units away from the + /// location that the 'Base' load is loading from. + bool isConsecutiveLoad(SDNode *LD, SDNode *Base, unsigned Bytes, int Dist, + const MachineFrameInfo *MFI) const; + + /// PerformDAGCombine - This method will be invoked for all target nodes and + /// for any target-independent nodes that the target has registered with + /// invoke it for. + /// + /// The semantics are as follows: + /// Return Value: + /// SDValue.Val == 0 - No change was made + /// SDValue.Val == N - N was replaced, is dead, and is already handled. + /// otherwise - N should be replaced by the returned Operand. + /// + /// In addition, methods provided by DAGCombinerInfo may be used to perform + /// more complex transformations. + /// + virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; + + //===--------------------------------------------------------------------===// + // TargetLowering Configuration Methods - These methods should be invoked by + // the derived class constructor to configure this object for the target. + // + +protected: + /// setUsesGlobalOffsetTable - Specify that this target does or doesn't use a + /// GOT for PC-relative code. + void setUsesGlobalOffsetTable(bool V) { UsesGlobalOffsetTable = V; } + + /// setShiftAmountType - Describe the type that should be used for shift + /// amounts. This type defaults to the pointer type. + void setShiftAmountType(MVT VT) { ShiftAmountTy = VT; } + + /// setBooleanContents - Specify how the target extends the result of a + /// boolean value from i1 to a wider type. See getBooleanContents. + void setBooleanContents(BooleanContent Ty) { BooleanContents = Ty; } + + /// setSchedulingPreference - Specify the target scheduling preference. + void setSchedulingPreference(SchedPreference Pref) { + SchedPreferenceInfo = Pref; + } + + /// setShiftAmountFlavor - Describe how the target handles out of range shift + /// amounts. + void setShiftAmountFlavor(OutOfRangeShiftAmount OORSA) { + ShiftAmtHandling = OORSA; + } + + /// setUseUnderscoreSetJmp - Indicate whether this target prefers to + /// use _setjmp to implement llvm.setjmp or the non _ version. + /// Defaults to false. + void setUseUnderscoreSetJmp(bool Val) { + UseUnderscoreSetJmp = Val; + } + + /// setUseUnderscoreLongJmp - Indicate whether this target prefers to + /// use _longjmp to implement llvm.longjmp or the non _ version. + /// Defaults to false. + void setUseUnderscoreLongJmp(bool Val) { + UseUnderscoreLongJmp = Val; + } + + /// setStackPointerRegisterToSaveRestore - If set to a physical register, this + /// specifies the register that llvm.savestack/llvm.restorestack should save + /// and restore. + void setStackPointerRegisterToSaveRestore(unsigned R) { + StackPointerRegisterToSaveRestore = R; + } + + /// setExceptionPointerRegister - If set to a physical register, this sets + /// the register that receives the exception address on entry to a landing + /// pad. + void setExceptionPointerRegister(unsigned R) { + ExceptionPointerRegister = R; + } + + /// setExceptionSelectorRegister - If set to a physical register, this sets + /// the register that receives the exception typeid on entry to a landing + /// pad. + void setExceptionSelectorRegister(unsigned R) { + ExceptionSelectorRegister = R; + } + + /// SelectIsExpensive - Tells the code generator not to expand operations + /// into sequences that use the select operations if possible. + void setSelectIsExpensive() { SelectIsExpensive = true; } + + /// setIntDivIsCheap - Tells the code generator that integer divide is + /// expensive, and if possible, should be replaced by an alternate sequence + /// of instructions not containing an integer divide. + void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; } + + /// setPow2DivIsCheap - Tells the code generator that it shouldn't generate + /// srl/add/sra for a signed divide by power of two, and let the target handle + /// it. + void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; } + + /// addRegisterClass - Add the specified register class as an available + /// regclass for the specified value type. This indicates the selector can + /// handle values of that class natively. + void addRegisterClass(MVT VT, TargetRegisterClass *RC) { + assert((unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); + AvailableRegClasses.push_back(std::make_pair(VT, RC)); + RegClassForVT[VT.getSimpleVT()] = RC; + } + + /// computeRegisterProperties - Once all of the register classes are added, + /// this allows us to compute derived properties we expose. + void computeRegisterProperties(); + + /// setOperationAction - Indicate that the specified operation does not work + /// with the specified type and indicate what to do about it. + void setOperationAction(unsigned Op, MVT VT, + LegalizeAction Action) { + assert((unsigned)VT.getSimpleVT() < sizeof(OpActions[0])*4 && + Op < array_lengthof(OpActions) && "Table isn't big enough!"); + OpActions[Op] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); + OpActions[Op] |= (uint64_t)Action << VT.getSimpleVT()*2; + } + + /// setLoadExtAction - Indicate that the specified load with extension does + /// not work with the with specified type and indicate what to do about it. + void setLoadExtAction(unsigned ExtType, MVT VT, + LegalizeAction Action) { + assert((unsigned)VT.getSimpleVT() < sizeof(LoadExtActions[0])*4 && + ExtType < array_lengthof(LoadExtActions) && + "Table isn't big enough!"); + LoadExtActions[ExtType] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); + LoadExtActions[ExtType] |= (uint64_t)Action << VT.getSimpleVT()*2; + } + + /// setTruncStoreAction - Indicate that the specified truncating store does + /// not work with the with specified type and indicate what to do about it. + void setTruncStoreAction(MVT ValVT, MVT MemVT, + LegalizeAction Action) { + assert((unsigned)ValVT.getSimpleVT() < array_lengthof(TruncStoreActions) && + (unsigned)MemVT.getSimpleVT() < sizeof(TruncStoreActions[0])*4 && + "Table isn't big enough!"); + TruncStoreActions[ValVT.getSimpleVT()] &= ~(uint64_t(3UL) << + MemVT.getSimpleVT()*2); + TruncStoreActions[ValVT.getSimpleVT()] |= (uint64_t)Action << + MemVT.getSimpleVT()*2; + } + + /// setIndexedLoadAction - Indicate that the specified indexed load does or + /// does not work with the with specified type and indicate what to do abort + /// it. NOTE: All indexed mode loads are initialized to Expand in + /// TargetLowering.cpp + void setIndexedLoadAction(unsigned IdxMode, MVT VT, + LegalizeAction Action) { + assert((unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[0])*4 && + IdxMode < array_lengthof(IndexedModeActions[0]) && + "Table isn't big enough!"); + IndexedModeActions[0][IdxMode] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); + IndexedModeActions[0][IdxMode] |= (uint64_t)Action << VT.getSimpleVT()*2; + } + + /// setIndexedStoreAction - Indicate that the specified indexed store does or + /// does not work with the with specified type and indicate what to do about + /// it. NOTE: All indexed mode stores are initialized to Expand in + /// TargetLowering.cpp + void setIndexedStoreAction(unsigned IdxMode, MVT VT, + LegalizeAction Action) { + assert((unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[1][0])*4 && + IdxMode < array_lengthof(IndexedModeActions[1]) && + "Table isn't big enough!"); + IndexedModeActions[1][IdxMode] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); + IndexedModeActions[1][IdxMode] |= (uint64_t)Action << VT.getSimpleVT()*2; + } + + /// setConvertAction - Indicate that the specified conversion does or does + /// not work with the with specified type and indicate what to do about it. + void setConvertAction(MVT FromVT, MVT ToVT, + LegalizeAction Action) { + assert((unsigned)FromVT.getSimpleVT() < array_lengthof(ConvertActions) && + (unsigned)ToVT.getSimpleVT() < sizeof(ConvertActions[0])*4 && + "Table isn't big enough!"); + ConvertActions[FromVT.getSimpleVT()] &= ~(uint64_t(3UL) << + ToVT.getSimpleVT()*2); + ConvertActions[FromVT.getSimpleVT()] |= (uint64_t)Action << + ToVT.getSimpleVT()*2; + } + + /// setCondCodeAction - Indicate that the specified condition code is or isn't + /// supported on the target and indicate what to do about it. + void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action) { + assert((unsigned)VT.getSimpleVT() < sizeof(CondCodeActions[0])*4 && + (unsigned)CC < array_lengthof(CondCodeActions) && + "Table isn't big enough!"); + CondCodeActions[(unsigned)CC] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); + CondCodeActions[(unsigned)CC] |= (uint64_t)Action << VT.getSimpleVT()*2; + } + + /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the + /// promotion code defaults to trying a larger integer/fp until it can find + /// one that works. If that default is insufficient, this method can be used + /// by the target to override the default. + void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { + PromoteToType[std::make_pair(Opc, OrigVT.getSimpleVT())] = + DestVT.getSimpleVT(); + } + + /// addLegalFPImmediate - Indicate that this target can instruction select + /// the specified FP immediate natively. + void addLegalFPImmediate(const APFloat& Imm) { + LegalFPImmediates.push_back(Imm); + } + + /// setTargetDAGCombine - Targets should invoke this method for each target + /// independent node that they want to provide a custom DAG combiner for by + /// implementing the PerformDAGCombine virtual method. + void setTargetDAGCombine(ISD::NodeType NT) { + assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); + TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7); + } + + /// setJumpBufSize - Set the target's required jmp_buf buffer size (in + /// bytes); default is 200 + void setJumpBufSize(unsigned Size) { + JumpBufSize = Size; + } + + /// setJumpBufAlignment - Set the target's required jmp_buf buffer + /// alignment (in bytes); default is 0 + void setJumpBufAlignment(unsigned Align) { + JumpBufAlignment = Align; + } + + /// setIfCvtBlockSizeLimit - Set the target's if-conversion block size + /// limit (in number of instructions); default is 2. + void setIfCvtBlockSizeLimit(unsigned Limit) { + IfCvtBlockSizeLimit = Limit; + } + + /// setIfCvtDupBlockSizeLimit - Set the target's block size limit (in number + /// of instructions) to be considered for code duplication during + /// if-conversion; default is 2. + void setIfCvtDupBlockSizeLimit(unsigned Limit) { + IfCvtDupBlockSizeLimit = Limit; + } + + /// setPrefLoopAlignment - Set the target's preferred loop alignment. Default + /// alignment is zero, it means the target does not care about loop alignment. + void setPrefLoopAlignment(unsigned Align) { + PrefLoopAlignment = Align; + } + +public: + + virtual const TargetSubtarget *getSubtarget() { + assert(0 && "Not Implemented"); + return NULL; // this is here to silence compiler errors + } + //===--------------------------------------------------------------------===// + // Lowering methods - These methods must be implemented by targets so that + // the SelectionDAGLowering code knows how to lower these. + // + + /// LowerArguments - This hook must be implemented to indicate how we should + /// lower the arguments for the specified function, into the specified DAG. + virtual void + LowerArguments(Function &F, SelectionDAG &DAG, + SmallVectorImpl<SDValue>& ArgValues, DebugLoc dl); + + /// LowerCallTo - This hook lowers an abstract call to a function into an + /// actual call. This returns a pair of operands. The first element is the + /// return value for the function (if RetTy is not VoidTy). The second + /// element is the outgoing token chain. + struct ArgListEntry { + SDValue Node; + const Type* Ty; + bool isSExt : 1; + bool isZExt : 1; + bool isInReg : 1; + bool isSRet : 1; + bool isNest : 1; + bool isByVal : 1; + uint16_t Alignment; + + ArgListEntry() : isSExt(false), isZExt(false), isInReg(false), + isSRet(false), isNest(false), isByVal(false), Alignment(0) { } + }; + typedef std::vector<ArgListEntry> ArgListTy; + virtual std::pair<SDValue, SDValue> + LowerCallTo(SDValue Chain, const Type *RetTy, bool RetSExt, bool RetZExt, + bool isVarArg, bool isInreg, unsigned CallingConv, + bool isTailCall, SDValue Callee, ArgListTy &Args, + SelectionDAG &DAG, DebugLoc dl); + + /// EmitTargetCodeForMemcpy - Emit target-specific code that performs a + /// memcpy. This can be used by targets to provide code sequences for cases + /// that don't fit the target's parameters for simple loads/stores and can be + /// more efficient than using a library call. This function can return a null + /// SDValue if the target declines to use custom code and a different + /// lowering strategy should be used. + /// + /// If AlwaysInline is true, the size is constant and the target should not + /// emit any calls and is strongly encouraged to attempt to emit inline code + /// even if it is beyond the usual threshold because this intrinsic is being + /// expanded in a place where calls are not feasible (e.g. within the prologue + /// for another call). If the target chooses to decline an AlwaysInline + /// request here, legalize will resort to using simple loads and stores. + virtual SDValue + EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, + SDValue Chain, + SDValue Op1, SDValue Op2, + SDValue Op3, unsigned Align, + bool AlwaysInline, + const Value *DstSV, uint64_t DstOff, + const Value *SrcSV, uint64_t SrcOff) { + return SDValue(); + } + + /// EmitTargetCodeForMemmove - Emit target-specific code that performs a + /// memmove. This can be used by targets to provide code sequences for cases + /// that don't fit the target's parameters for simple loads/stores and can be + /// more efficient than using a library call. This function can return a null + /// SDValue if the target declines to use custom code and a different + /// lowering strategy should be used. + virtual SDValue + EmitTargetCodeForMemmove(SelectionDAG &DAG, DebugLoc dl, + SDValue Chain, + SDValue Op1, SDValue Op2, + SDValue Op3, unsigned Align, + const Value *DstSV, uint64_t DstOff, + const Value *SrcSV, uint64_t SrcOff) { + return SDValue(); + } + + /// EmitTargetCodeForMemset - Emit target-specific code that performs a + /// memset. This can be used by targets to provide code sequences for cases + /// that don't fit the target's parameters for simple stores and can be more + /// efficient than using a library call. This function can return a null + /// SDValue if the target declines to use custom code and a different + /// lowering strategy should be used. + virtual SDValue + EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, + SDValue Chain, + SDValue Op1, SDValue Op2, + SDValue Op3, unsigned Align, + const Value *DstSV, uint64_t DstOff) { + return SDValue(); + } + + /// LowerOperationWrapper - This callback is invoked by the type legalizer + /// to legalize nodes with an illegal operand type but legal result types. + /// It replaces the LowerOperation callback in the type Legalizer. + /// The reason we can not do away with LowerOperation entirely is that + /// LegalizeDAG isn't yet ready to use this callback. + /// TODO: Consider merging with ReplaceNodeResults. + + /// The target places new result values for the node in Results (their number + /// and types must exactly match those of the original return values of + /// the node), or leaves Results empty, which indicates that the node is not + /// to be custom lowered after all. + /// The default implementation calls LowerOperation. + virtual void LowerOperationWrapper(SDNode *N, + SmallVectorImpl<SDValue> &Results, + SelectionDAG &DAG); + + /// LowerOperation - This callback is invoked for operations that are + /// unsupported by the target, which are registered to use 'custom' lowering, + /// and whose defined values are all legal. + /// If the target has no operations that require custom lowering, it need not + /// implement this. The default implementation of this aborts. + virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); + + /// ReplaceNodeResults - This callback is invoked when a node result type is + /// illegal for the target, and the operation was registered to use 'custom' + /// lowering for that result type. The target places new result values for + /// the node in Results (their number and types must exactly match those of + /// the original return values of the node), or leaves Results empty, which + /// indicates that the node is not to be custom lowered after all. + /// + /// If the target has no operations that require custom lowering, it need not + /// implement this. The default implementation aborts. + virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, + SelectionDAG &DAG) { + assert(0 && "ReplaceNodeResults not implemented for this target!"); + } + + /// IsEligibleForTailCallOptimization - Check whether the call is eligible for + /// tail call optimization. Targets which want to do tail call optimization + /// should override this function. + virtual bool IsEligibleForTailCallOptimization(CallSDNode *Call, + SDValue Ret, + SelectionDAG &DAG) const { + return false; + } + + /// CheckTailCallReturnConstraints - Check whether CALL node immediatly + /// preceeds the RET node and whether the return uses the result of the node + /// or is a void return. This function can be used by the target to determine + /// eligiblity of tail call optimization. + static bool CheckTailCallReturnConstraints(CallSDNode *TheCall, SDValue Ret); + + /// GetPossiblePreceedingTailCall - Get preceeding TailCallNodeOpCode node if + /// it exists. Skip a possible ISD::TokenFactor. + static SDValue GetPossiblePreceedingTailCall(SDValue Chain, + unsigned TailCallNodeOpCode) { + if (Chain.getOpcode() == TailCallNodeOpCode) { + return Chain; + } else if (Chain.getOpcode() == ISD::TokenFactor) { + if (Chain.getNumOperands() && + Chain.getOperand(0).getOpcode() == TailCallNodeOpCode) + return Chain.getOperand(0); + } + return Chain; + } + + /// getTargetNodeName() - This method returns the name of a target specific + /// DAG node. + virtual const char *getTargetNodeName(unsigned Opcode) const; + + /// createFastISel - This method returns a target specific FastISel object, + /// or null if the target does not support "fast" ISel. + virtual FastISel * + createFastISel(MachineFunction &, + MachineModuleInfo *, DwarfWriter *, + DenseMap<const Value *, unsigned> &, + DenseMap<const BasicBlock *, MachineBasicBlock *> &, + DenseMap<const AllocaInst *, int> & +#ifndef NDEBUG + , SmallSet<Instruction*, 8> &CatchInfoLost +#endif + ) { + return 0; + } + + //===--------------------------------------------------------------------===// + // Inline Asm Support hooks + // + + enum ConstraintType { + C_Register, // Constraint represents specific register(s). + C_RegisterClass, // Constraint represents any of register(s) in class. + C_Memory, // Memory constraint. + C_Other, // Something else. + C_Unknown // Unsupported constraint. + }; + + /// AsmOperandInfo - This contains information for each constraint that we are + /// lowering. + struct AsmOperandInfo : public InlineAsm::ConstraintInfo { + /// ConstraintCode - This contains the actual string for the code, like "m". + /// TargetLowering picks the 'best' code from ConstraintInfo::Codes that + /// most closely matches the operand. + std::string ConstraintCode; + + /// ConstraintType - Information about the constraint code, e.g. Register, + /// RegisterClass, Memory, Other, Unknown. + TargetLowering::ConstraintType ConstraintType; + + /// CallOperandval - If this is the result output operand or a + /// clobber, this is null, otherwise it is the incoming operand to the + /// CallInst. This gets modified as the asm is processed. + Value *CallOperandVal; + + /// ConstraintVT - The ValueType for the operand value. + MVT ConstraintVT; + + /// isMatchingInputConstraint - Return true of this is an input operand that + /// is a matching constraint like "4". + bool isMatchingInputConstraint() const; + + /// getMatchedOperand - If this is an input matching constraint, this method + /// returns the output operand it matches. + unsigned getMatchedOperand() const; + + AsmOperandInfo(const InlineAsm::ConstraintInfo &info) + : InlineAsm::ConstraintInfo(info), + ConstraintType(TargetLowering::C_Unknown), + CallOperandVal(0), ConstraintVT(MVT::Other) { + } + }; + + /// ComputeConstraintToUse - Determines the constraint code and constraint + /// type to use for the specific AsmOperandInfo, setting + /// OpInfo.ConstraintCode and OpInfo.ConstraintType. If the actual operand + /// being passed in is available, it can be passed in as Op, otherwise an + /// empty SDValue can be passed. If hasMemory is true it means one of the asm + /// constraint of the inline asm instruction being processed is 'm'. + virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, + SDValue Op, + bool hasMemory, + SelectionDAG *DAG = 0) const; + + /// getConstraintType - Given a constraint, return the type of constraint it + /// is for this target. + virtual ConstraintType getConstraintType(const std::string &Constraint) const; + + /// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"), + /// return a list of registers that can be used to satisfy the constraint. + /// This should only be used for C_RegisterClass constraints. + virtual std::vector<unsigned> + getRegClassForInlineAsmConstraint(const std::string &Constraint, + MVT VT) const; + + /// getRegForInlineAsmConstraint - Given a physical register constraint (e.g. + /// {edx}), return the register number and the register class for the + /// register. + /// + /// Given a register class constraint, like 'r', if this corresponds directly + /// to an LLVM register class, return a register of 0 and the register class + /// pointer. + /// + /// This should only be used for C_Register constraints. On error, + /// this returns a register number of 0 and a null register class pointer.. + virtual std::pair<unsigned, const TargetRegisterClass*> + getRegForInlineAsmConstraint(const std::string &Constraint, + MVT VT) const; + + /// LowerXConstraint - try to replace an X constraint, which matches anything, + /// with another that has more specific requirements based on the type of the + /// corresponding operand. This returns null if there is no replacement to + /// make. + virtual const char *LowerXConstraint(MVT ConstraintVT) const; + + /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops + /// vector. If it is invalid, don't add anything to Ops. If hasMemory is true + /// it means one of the asm constraint of the inline asm instruction being + /// processed is 'm'. + virtual void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter, + bool hasMemory, + std::vector<SDValue> &Ops, + SelectionDAG &DAG) const; + + //===--------------------------------------------------------------------===// + // Scheduler hooks + // + + // EmitInstrWithCustomInserter - This method should be implemented by targets + // that mark instructions with the 'usesCustomDAGSchedInserter' flag. These + // instructions are special in various ways, which require special support to + // insert. The specified MachineInstr is created but not inserted into any + // basic blocks, and the scheduler passes ownership of it to this method. + virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, + MachineBasicBlock *MBB) const; + + //===--------------------------------------------------------------------===// + // Addressing mode description hooks (used by LSR etc). + // + + /// AddrMode - This represents an addressing mode of: + /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + /// If BaseGV is null, there is no BaseGV. + /// If BaseOffs is zero, there is no base offset. + /// If HasBaseReg is false, there is no base register. + /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with + /// no scale. + /// + struct AddrMode { + GlobalValue *BaseGV; + int64_t BaseOffs; + bool HasBaseReg; + int64_t Scale; + AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {} + }; + + /// isLegalAddressingMode - Return true if the addressing mode represented by + /// AM is legal for this target, for a load/store of the specified type. + /// The type may be VoidTy, in which case only return true if the addressing + /// mode is legal for a load/store of any legal type. + /// TODO: Handle pre/postinc as well. + virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty) const; + + /// isTruncateFree - Return true if it's free to truncate a value of + /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in + /// register EAX to i16 by referencing its sub-register AX. + virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const { + return false; + } + + virtual bool isTruncateFree(MVT VT1, MVT VT2) const { + return false; + } + + /// isZExtFree - Return true if any actual instruction that defines a + /// value of type Ty1 implicit zero-extends the value to Ty2 in the result + /// register. This does not necessarily include registers defined in + /// unknown ways, such as incoming arguments, or copies from unknown + /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this + /// does not necessarily apply to truncate instructions. e.g. on x86-64, + /// all instructions that define 32-bit values implicit zero-extend the + /// result out to 64 bits. + virtual bool isZExtFree(const Type *Ty1, const Type *Ty2) const { + return false; + } + + virtual bool isZExtFree(MVT VT1, MVT VT2) const { + return false; + } + + /// isNarrowingProfitable - Return true if it's profitable to narrow + /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow + /// from i32 to i8 but not from i32 to i16. + virtual bool isNarrowingProfitable(MVT VT1, MVT VT2) const { + return false; + } + + //===--------------------------------------------------------------------===// + // Div utility functions + // + SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, + std::vector<SDNode*>* Created) const; + SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, + std::vector<SDNode*>* Created) const; + + + //===--------------------------------------------------------------------===// + // Runtime Library hooks + // + + /// setLibcallName - Rename the default libcall routine name for the specified + /// libcall. + void setLibcallName(RTLIB::Libcall Call, const char *Name) { + LibcallRoutineNames[Call] = Name; + } + + /// getLibcallName - Get the libcall routine name for the specified libcall. + /// + const char *getLibcallName(RTLIB::Libcall Call) const { + return LibcallRoutineNames[Call]; + } + + /// setCmpLibcallCC - Override the default CondCode to be used to test the + /// result of the comparison libcall against zero. + void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) { + CmpLibcallCCs[Call] = CC; + } + + /// getCmpLibcallCC - Get the CondCode that's to be used to test the result of + /// the comparison libcall against zero. + ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const { + return CmpLibcallCCs[Call]; + } + +private: + TargetMachine &TM; + const TargetData *TD; + + /// PointerTy - The type to use for pointers, usually i32 or i64. + /// + MVT PointerTy; + + /// IsLittleEndian - True if this is a little endian target. + /// + bool IsLittleEndian; + + /// UsesGlobalOffsetTable - True if this target uses a GOT for PIC codegen. + /// + bool UsesGlobalOffsetTable; + + /// SelectIsExpensive - Tells the code generator not to expand operations + /// into sequences that use the select operations if possible. + bool SelectIsExpensive; + + /// IntDivIsCheap - Tells the code generator not to expand integer divides by + /// constants into a sequence of muls, adds, and shifts. This is a hack until + /// a real cost model is in place. If we ever optimize for size, this will be + /// set to true unconditionally. + bool IntDivIsCheap; + + /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate + /// srl/add/sra for a signed divide by power of two, and let the target handle + /// it. + bool Pow2DivIsCheap; + + /// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement + /// llvm.setjmp. Defaults to false. + bool UseUnderscoreSetJmp; + + /// UseUnderscoreLongJmp - This target prefers to use _longjmp to implement + /// llvm.longjmp. Defaults to false. + bool UseUnderscoreLongJmp; + + /// ShiftAmountTy - The type to use for shift amounts, usually i8 or whatever + /// PointerTy is. + MVT ShiftAmountTy; + + OutOfRangeShiftAmount ShiftAmtHandling; + + /// BooleanContents - Information about the contents of the high-bits in + /// boolean values held in a type wider than i1. See getBooleanContents. + BooleanContent BooleanContents; + + /// SchedPreferenceInfo - The target scheduling preference: shortest possible + /// total cycles or lowest register usage. + SchedPreference SchedPreferenceInfo; + + /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers + unsigned JumpBufSize; + + /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf + /// buffers + unsigned JumpBufAlignment; + + /// IfCvtBlockSizeLimit - The maximum allowed size for a block to be + /// if-converted. + unsigned IfCvtBlockSizeLimit; + + /// IfCvtDupBlockSizeLimit - The maximum allowed size for a block to be + /// duplicated during if-conversion. + unsigned IfCvtDupBlockSizeLimit; + + /// PrefLoopAlignment - The perferred loop alignment. + /// + unsigned PrefLoopAlignment; + + /// StackPointerRegisterToSaveRestore - If set to a physical register, this + /// specifies the register that llvm.savestack/llvm.restorestack should save + /// and restore. + unsigned StackPointerRegisterToSaveRestore; + + /// ExceptionPointerRegister - If set to a physical register, this specifies + /// the register that receives the exception address on entry to a landing + /// pad. + unsigned ExceptionPointerRegister; + + /// ExceptionSelectorRegister - If set to a physical register, this specifies + /// the register that receives the exception typeid on entry to a landing + /// pad. + unsigned ExceptionSelectorRegister; + + /// RegClassForVT - This indicates the default register class to use for + /// each ValueType the target supports natively. + TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE]; + unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE]; + MVT RegisterTypeForVT[MVT::LAST_VALUETYPE]; + + /// TransformToType - For any value types we are promoting or expanding, this + /// contains the value type that we are changing to. For Expanded types, this + /// contains one step of the expand (e.g. i64 -> i32), even if there are + /// multiple steps required (e.g. i64 -> i16). For types natively supported + /// by the system, this holds the same type (e.g. i32 -> i32). + MVT TransformToType[MVT::LAST_VALUETYPE]; + + /// OpActions - For each operation and each value type, keep a LegalizeAction + /// that indicates how instruction selection should deal with the operation. + /// Most operations are Legal (aka, supported natively by the target), but + /// operations that are not should be described. Note that operations on + /// non-legal value types are not described here. + uint64_t OpActions[ISD::BUILTIN_OP_END]; + + /// LoadExtActions - For each load of load extension type and each value type, + /// keep a LegalizeAction that indicates how instruction selection should deal + /// with the load. + uint64_t LoadExtActions[ISD::LAST_LOADEXT_TYPE]; + + /// TruncStoreActions - For each truncating store, keep a LegalizeAction that + /// indicates how instruction selection should deal with the store. + uint64_t TruncStoreActions[MVT::LAST_VALUETYPE]; + + /// IndexedModeActions - For each indexed mode and each value type, keep a + /// pair of LegalizeAction that indicates how instruction selection should + /// deal with the load / store. + uint64_t IndexedModeActions[2][ISD::LAST_INDEXED_MODE]; + + /// ConvertActions - For each conversion from source type to destination type, + /// keep a LegalizeAction that indicates how instruction selection should + /// deal with the conversion. + /// Currently, this is used only for floating->floating conversions + /// (FP_EXTEND and FP_ROUND). + uint64_t ConvertActions[MVT::LAST_VALUETYPE]; + + /// CondCodeActions - For each condition code (ISD::CondCode) keep a + /// LegalizeAction that indicates how instruction selection should + /// deal with the condition code. + uint64_t CondCodeActions[ISD::SETCC_INVALID]; + + ValueTypeActionImpl ValueTypeActions; + + std::vector<APFloat> LegalFPImmediates; + + std::vector<std::pair<MVT, TargetRegisterClass*> > AvailableRegClasses; + + /// TargetDAGCombineArray - Targets can specify ISD nodes that they would + /// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(), + /// which sets a bit in this array. + unsigned char + TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT]; + + /// PromoteToType - For operations that must be promoted to a specific type, + /// this holds the destination type. This map should be sparse, so don't hold + /// it as an array. + /// + /// Targets add entries to this map with AddPromotedToType(..), clients access + /// this with getTypeToPromoteTo(..). + std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType> + PromoteToType; + + /// LibcallRoutineNames - Stores the name each libcall. + /// + const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL]; + + /// CmpLibcallCCs - The ISD::CondCode that should be used to test the result + /// of each of the comparison libcall against zero. + ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL]; + +protected: + /// When lowering \@llvm.memset this field specifies the maximum number of + /// store operations that may be substituted for the call to memset. Targets + /// must set this value based on the cost threshold for that target. Targets + /// should assume that the memset will be done using as many of the largest + /// store operations first, followed by smaller ones, if necessary, per + /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine + /// with 16-bit alignment would result in four 2-byte stores and one 1-byte + /// store. This only applies to setting a constant array of a constant size. + /// @brief Specify maximum number of store instructions per memset call. + unsigned maxStoresPerMemset; + + /// When lowering \@llvm.memcpy this field specifies the maximum number of + /// store operations that may be substituted for a call to memcpy. Targets + /// must set this value based on the cost threshold for that target. Targets + /// should assume that the memcpy will be done using as many of the largest + /// store operations first, followed by smaller ones, if necessary, per + /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine + /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store + /// and one 1-byte store. This only applies to copying a constant array of + /// constant size. + /// @brief Specify maximum bytes of store instructions per memcpy call. + unsigned maxStoresPerMemcpy; + + /// When lowering \@llvm.memmove this field specifies the maximum number of + /// store instructions that may be substituted for a call to memmove. Targets + /// must set this value based on the cost threshold for that target. Targets + /// should assume that the memmove will be done using as many of the largest + /// store operations first, followed by smaller ones, if necessary, per + /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine + /// with 8-bit alignment would result in nine 1-byte stores. This only + /// applies to copying a constant array of constant size. + /// @brief Specify maximum bytes of store instructions per memmove call. + unsigned maxStoresPerMemmove; + + /// This field specifies whether the target machine permits unaligned memory + /// accesses. This is used, for example, to determine the size of store + /// operations when copying small arrays and other similar tasks. + /// @brief Indicate whether the target permits unaligned memory accesses. + bool allowUnalignedMemoryAccesses; + + /// This field specifies whether the target can benefit from code placement + /// optimization. + bool benefitFromCodePlacementOpt; +}; +} // end llvm namespace + +#endif diff --git a/include/llvm/Target/TargetMachOWriterInfo.h b/include/llvm/Target/TargetMachOWriterInfo.h new file mode 100644 index 0000000000000..f723bb5bee6a5 --- /dev/null +++ b/include/llvm/Target/TargetMachOWriterInfo.h @@ -0,0 +1,112 @@ +//===-- llvm/Target/TargetMachOWriterInfo.h - MachO Writer Info--*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the TargetMachOWriterInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETMACHOWRITERINFO_H +#define LLVM_TARGET_TARGETMACHOWRITERINFO_H + +#include "llvm/CodeGen/MachineRelocation.h" + +namespace llvm { + + class MachineBasicBlock; + class OutputBuffer; + + //===--------------------------------------------------------------------===// + // TargetMachOWriterInfo + //===--------------------------------------------------------------------===// + + class TargetMachOWriterInfo { + uint32_t CPUType; // CPU specifier + uint32_t CPUSubType; // Machine specifier + public: + // The various CPU_TYPE_* constants are already defined by at least one + // system header file and create compilation errors if not respected. +#if !defined(CPU_TYPE_I386) +#define CPU_TYPE_I386 7 +#endif +#if !defined(CPU_TYPE_X86_64) +#define CPU_TYPE_X86_64 (CPU_TYPE_I386 | 0x1000000) +#endif +#if !defined(CPU_TYPE_ARM) +#define CPU_TYPE_ARM 12 +#endif +#if !defined(CPU_TYPE_SPARC) +#define CPU_TYPE_SPARC 14 +#endif +#if !defined(CPU_TYPE_POWERPC) +#define CPU_TYPE_POWERPC 18 +#endif +#if !defined(CPU_TYPE_POWERPC64) +#define CPU_TYPE_POWERPC64 (CPU_TYPE_POWERPC | 0x1000000) +#endif + + // Constants for the cputype field + // see <mach/machine.h> + enum { + HDR_CPU_TYPE_I386 = CPU_TYPE_I386, + HDR_CPU_TYPE_X86_64 = CPU_TYPE_X86_64, + HDR_CPU_TYPE_ARM = CPU_TYPE_ARM, + HDR_CPU_TYPE_SPARC = CPU_TYPE_SPARC, + HDR_CPU_TYPE_POWERPC = CPU_TYPE_POWERPC, + HDR_CPU_TYPE_POWERPC64 = CPU_TYPE_POWERPC64 + }; + +#if !defined(CPU_SUBTYPE_I386_ALL) +#define CPU_SUBTYPE_I386_ALL 3 +#endif +#if !defined(CPU_SUBTYPE_X86_64_ALL) +#define CPU_SUBTYPE_X86_64_ALL 3 +#endif +#if !defined(CPU_SUBTYPE_ARM_ALL) +#define CPU_SUBTYPE_ARM_ALL 0 +#endif +#if !defined(CPU_SUBTYPE_SPARC_ALL) +#define CPU_SUBTYPE_SPARC_ALL 0 +#endif +#if !defined(CPU_SUBTYPE_POWERPC_ALL) +#define CPU_SUBTYPE_POWERPC_ALL 0 +#endif + + // Constants for the cpusubtype field + // see <mach/machine.h> + enum { + HDR_CPU_SUBTYPE_I386_ALL = CPU_SUBTYPE_I386_ALL, + HDR_CPU_SUBTYPE_X86_64_ALL = CPU_SUBTYPE_X86_64_ALL, + HDR_CPU_SUBTYPE_ARM_ALL = CPU_SUBTYPE_ARM_ALL, + HDR_CPU_SUBTYPE_SPARC_ALL = CPU_SUBTYPE_SPARC_ALL, + HDR_CPU_SUBTYPE_POWERPC_ALL = CPU_SUBTYPE_POWERPC_ALL + }; + + TargetMachOWriterInfo(uint32_t cputype, uint32_t cpusubtype) + : CPUType(cputype), CPUSubType(cpusubtype) {} + virtual ~TargetMachOWriterInfo(); + + virtual MachineRelocation GetJTRelocation(unsigned Offset, + MachineBasicBlock *MBB) const; + + virtual unsigned GetTargetRelocation(MachineRelocation &MR, + unsigned FromIdx, + unsigned ToAddr, + unsigned ToIdx, + OutputBuffer &RelocOut, + OutputBuffer &SecOut, + bool Scattered, + bool Extern) const { return 0; } + + uint32_t getCPUType() const { return CPUType; } + uint32_t getCPUSubType() const { return CPUSubType; } + }; + +} // end llvm namespace + +#endif // LLVM_TARGET_TARGETMACHOWRITERINFO_H diff --git a/include/llvm/Target/TargetMachine.h b/include/llvm/Target/TargetMachine.h new file mode 100644 index 0000000000000..bdcc4eff675fb --- /dev/null +++ b/include/llvm/Target/TargetMachine.h @@ -0,0 +1,432 @@ +//===-- llvm/Target/TargetMachine.h - Target Information --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the TargetMachine and LLVMTargetMachine classes. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETMACHINE_H +#define LLVM_TARGET_TARGETMACHINE_H + +#include "llvm/Target/TargetInstrItineraries.h" +#include <cassert> + +namespace llvm { + +class TargetAsmInfo; +class TargetData; +class TargetSubtarget; +class TargetInstrInfo; +class TargetIntrinsicInfo; +class TargetJITInfo; +class TargetLowering; +class TargetFrameInfo; +class MachineCodeEmitter; +class JITCodeEmitter; +class TargetRegisterInfo; +class Module; +class PassManagerBase; +class PassManager; +class Pass; +class TargetMachOWriterInfo; +class TargetELFWriterInfo; +class raw_ostream; + +// Relocation model types. +namespace Reloc { + enum Model { + Default, + Static, + PIC_, // Cannot be named PIC due to collision with -DPIC + DynamicNoPIC + }; +} + +// Code model types. +namespace CodeModel { + enum Model { + Default, + Small, + Kernel, + Medium, + Large + }; +} + +namespace FileModel { + enum Model { + Error, + None, + AsmFile, + MachOFile, + ElfFile + }; +} + +// Code generation optimization level. +namespace CodeGenOpt { + enum Level { + Default, + None, + Aggressive + }; +} + +//===----------------------------------------------------------------------===// +/// +/// TargetMachine - Primary interface to the complete machine description for +/// the target machine. All target-specific information should be accessible +/// through this interface. +/// +class TargetMachine { + TargetMachine(const TargetMachine &); // DO NOT IMPLEMENT + void operator=(const TargetMachine &); // DO NOT IMPLEMENT +protected: // Can only create subclasses. + TargetMachine() : AsmInfo(0) { } + + /// getSubtargetImpl - virtual method implemented by subclasses that returns + /// a reference to that target's TargetSubtarget-derived member variable. + virtual const TargetSubtarget *getSubtargetImpl() const { return 0; } + + /// AsmInfo - Contains target specific asm information. + /// + mutable const TargetAsmInfo *AsmInfo; + + /// createTargetAsmInfo - Create a new instance of target specific asm + /// information. + virtual const TargetAsmInfo *createTargetAsmInfo() const { return 0; } + +public: + virtual ~TargetMachine(); + + /// getModuleMatchQuality - This static method should be implemented by + /// targets to indicate how closely they match the specified module. This is + /// used by the LLC tool to determine which target to use when an explicit + /// -march option is not specified. If a target returns zero, it will never + /// be chosen without an explicit -march option. + static unsigned getModuleMatchQuality(const Module &) { return 0; } + + /// getJITMatchQuality - This static method should be implemented by targets + /// that provide JIT capabilities to indicate how suitable they are for + /// execution on the current host. If a value of 0 is returned, the target + /// will not be used unless an explicit -march option is used. + static unsigned getJITMatchQuality() { return 0; } + + // Interfaces to the major aspects of target machine information: + // -- Instruction opcode and operand information + // -- Pipelines and scheduling information + // -- Stack frame information + // -- Selection DAG lowering information + // + virtual const TargetInstrInfo *getInstrInfo() const { return 0; } + virtual const TargetFrameInfo *getFrameInfo() const { return 0; } + virtual TargetLowering *getTargetLowering() const { return 0; } + virtual const TargetData *getTargetData() const { return 0; } + + /// getTargetAsmInfo - Return target specific asm information. + /// + const TargetAsmInfo *getTargetAsmInfo() const { + if (!AsmInfo) AsmInfo = createTargetAsmInfo(); + return AsmInfo; + } + + /// getSubtarget - This method returns a pointer to the specified type of + /// TargetSubtarget. In debug builds, it verifies that the object being + /// returned is of the correct type. + template<typename STC> const STC &getSubtarget() const { + const TargetSubtarget *TST = getSubtargetImpl(); + assert(TST && dynamic_cast<const STC*>(TST) && + "Not the right kind of subtarget!"); + return *static_cast<const STC*>(TST); + } + + /// getRegisterInfo - If register information is available, return it. If + /// not, return null. This is kept separate from RegInfo until RegInfo has + /// details of graph coloring register allocation removed from it. + /// + virtual const TargetRegisterInfo *getRegisterInfo() const { return 0; } + + /// getIntrinsicInfo - If intrinsic information is available, return it. If + /// not, return null. + /// + virtual const TargetIntrinsicInfo *getIntrinsicInfo() const { return 0; } + + /// getJITInfo - If this target supports a JIT, return information for it, + /// otherwise return null. + /// + virtual TargetJITInfo *getJITInfo() { return 0; } + + /// getInstrItineraryData - Returns instruction itinerary data for the target + /// or specific subtarget. + /// + virtual const InstrItineraryData getInstrItineraryData() const { + return InstrItineraryData(); + } + + /// getMachOWriterInfo - If this target supports a Mach-O writer, return + /// information for it, otherwise return null. + /// + virtual const TargetMachOWriterInfo *getMachOWriterInfo() const { return 0; } + + /// getELFWriterInfo - If this target supports an ELF writer, return + /// information for it, otherwise return null. + /// + virtual const TargetELFWriterInfo *getELFWriterInfo() const { return 0; } + + /// getRelocationModel - Returns the code generation relocation model. The + /// choices are static, PIC, and dynamic-no-pic, and target default. + static Reloc::Model getRelocationModel(); + + /// setRelocationModel - Sets the code generation relocation model. + /// + static void setRelocationModel(Reloc::Model Model); + + /// getCodeModel - Returns the code model. The choices are small, kernel, + /// medium, large, and target default. + static CodeModel::Model getCodeModel(); + + /// setCodeModel - Sets the code model. + /// + static void setCodeModel(CodeModel::Model Model); + + /// getAsmVerbosityDefault - Returns the default value of asm verbosity. + /// + static bool getAsmVerbosityDefault(); + + /// setAsmVerbosityDefault - Set the default value of asm verbosity. Default + /// is false. + static void setAsmVerbosityDefault(bool); + + /// CodeGenFileType - These enums are meant to be passed into + /// addPassesToEmitFile to indicate what type of file to emit. + enum CodeGenFileType { + AssemblyFile, ObjectFile, DynamicLibrary + }; + + /// getEnableTailMergeDefault - the default setting for -enable-tail-merge + /// on this target. User flag overrides. + virtual bool getEnableTailMergeDefault() const { return true; } + + /// addPassesToEmitFile - Add passes to the specified pass manager to get the + /// specified file emitted. Typically this will involve several steps of code + /// generation. If Fast is set to true, the code generator should emit code + /// as fast as possible, though the generated code may be less efficient. + /// This method should return FileModel::Error if emission of this file type + /// is not supported. + /// + virtual FileModel::Model addPassesToEmitFile(PassManagerBase &, + raw_ostream &, + CodeGenFileType, + CodeGenOpt::Level) { + return FileModel::None; + } + + /// addPassesToEmitFileFinish - If the passes to emit the specified file had + /// to be split up (e.g., to add an object writer pass), this method can be + /// used to finish up adding passes to emit the file, if necessary. + /// + virtual bool addPassesToEmitFileFinish(PassManagerBase &, + MachineCodeEmitter *, + CodeGenOpt::Level) { + return true; + } + + /// addPassesToEmitFileFinish - If the passes to emit the specified file had + /// to be split up (e.g., to add an object writer pass), this method can be + /// used to finish up adding passes to emit the file, if necessary. + /// + virtual bool addPassesToEmitFileFinish(PassManagerBase &, + JITCodeEmitter *, + CodeGenOpt::Level) { + return true; + } + + /// addPassesToEmitMachineCode - Add passes to the specified pass manager to + /// get machine code emitted. This uses a MachineCodeEmitter object to handle + /// actually outputting the machine code and resolving things like the address + /// of functions. This method returns true if machine code emission is + /// not supported. + /// + virtual bool addPassesToEmitMachineCode(PassManagerBase &, + MachineCodeEmitter &, + CodeGenOpt::Level) { + return true; + } + + /// addPassesToEmitMachineCode - Add passes to the specified pass manager to + /// get machine code emitted. This uses a MachineCodeEmitter object to handle + /// actually outputting the machine code and resolving things like the address + /// of functions. This method returns true if machine code emission is + /// not supported. + /// + virtual bool addPassesToEmitMachineCode(PassManagerBase &, + JITCodeEmitter &, + CodeGenOpt::Level) { + return true; + } + + /// addPassesToEmitWholeFile - This method can be implemented by targets that + /// require having the entire module at once. This is not recommended, do not + /// use this. + virtual bool WantsWholeFile() const { return false; } + virtual bool addPassesToEmitWholeFile(PassManager &, raw_ostream &, + CodeGenFileType, + CodeGenOpt::Level) { + return true; + } +}; + +/// LLVMTargetMachine - This class describes a target machine that is +/// implemented with the LLVM target-independent code generator. +/// +class LLVMTargetMachine : public TargetMachine { +protected: // Can only create subclasses. + LLVMTargetMachine() { } + + /// addCommonCodeGenPasses - Add standard LLVM codegen passes used for + /// both emitting to assembly files or machine code output. + /// + bool addCommonCodeGenPasses(PassManagerBase &, CodeGenOpt::Level); + +public: + + /// addPassesToEmitFile - Add passes to the specified pass manager to get the + /// specified file emitted. Typically this will involve several steps of code + /// generation. If OptLevel is None, the code generator should emit code as fast + /// as possible, though the generated code may be less efficient. This method + /// should return FileModel::Error if emission of this file type is not + /// supported. + /// + /// The default implementation of this method adds components from the + /// LLVM retargetable code generator, invoking the methods below to get + /// target-specific passes in standard locations. + /// + virtual FileModel::Model addPassesToEmitFile(PassManagerBase &PM, + raw_ostream &Out, + CodeGenFileType FileType, + CodeGenOpt::Level); + + /// addPassesToEmitFileFinish - If the passes to emit the specified file had + /// to be split up (e.g., to add an object writer pass), this method can be + /// used to finish up adding passes to emit the file, if necessary. + /// + virtual bool addPassesToEmitFileFinish(PassManagerBase &PM, + MachineCodeEmitter *MCE, + CodeGenOpt::Level); + + /// addPassesToEmitFileFinish - If the passes to emit the specified file had + /// to be split up (e.g., to add an object writer pass), this method can be + /// used to finish up adding passes to emit the file, if necessary. + /// + virtual bool addPassesToEmitFileFinish(PassManagerBase &PM, + JITCodeEmitter *MCE, + CodeGenOpt::Level); + + /// addPassesToEmitMachineCode - Add passes to the specified pass manager to + /// get machine code emitted. This uses a MachineCodeEmitter object to handle + /// actually outputting the machine code and resolving things like the address + /// of functions. This method returns true if machine code emission is + /// not supported. + /// + virtual bool addPassesToEmitMachineCode(PassManagerBase &PM, + MachineCodeEmitter &MCE, + CodeGenOpt::Level); + + /// addPassesToEmitMachineCode - Add passes to the specified pass manager to + /// get machine code emitted. This uses a MachineCodeEmitter object to handle + /// actually outputting the machine code and resolving things like the address + /// of functions. This method returns true if machine code emission is + /// not supported. + /// + virtual bool addPassesToEmitMachineCode(PassManagerBase &PM, + JITCodeEmitter &MCE, + CodeGenOpt::Level); + + /// Target-Independent Code Generator Pass Configuration Options. + + /// addInstSelector - This method should add any "last minute" LLVM->LLVM + /// passes, then install an instruction selector pass, which converts from + /// LLVM code to machine instructions. + virtual bool addInstSelector(PassManagerBase &, CodeGenOpt::Level) { + return true; + } + + /// addPreRegAllocPasses - This method may be implemented by targets that want + /// to run passes immediately before register allocation. This should return + /// true if -print-machineinstrs should print after these passes. + virtual bool addPreRegAlloc(PassManagerBase &, CodeGenOpt::Level) { + return false; + } + + /// addPostRegAllocPasses - This method may be implemented by targets that + /// want to run passes after register allocation but before prolog-epilog + /// insertion. This should return true if -print-machineinstrs should print + /// after these passes. + virtual bool addPostRegAlloc(PassManagerBase &, CodeGenOpt::Level) { + return false; + } + + /// addPreEmitPass - This pass may be implemented by targets that want to run + /// passes immediately before machine code is emitted. This should return + /// true if -print-machineinstrs should print out the code after the passes. + virtual bool addPreEmitPass(PassManagerBase &, CodeGenOpt::Level) { + return false; + } + + + /// addAssemblyEmitter - This pass should be overridden by the target to add + /// the asmprinter, if asm emission is supported. If this is not supported, + /// 'true' should be returned. + virtual bool addAssemblyEmitter(PassManagerBase &, CodeGenOpt::Level, + bool /* VerboseAsmDefault */, raw_ostream &) { + return true; + } + + /// addCodeEmitter - This pass should be overridden by the target to add a + /// code emitter, if supported. If this is not supported, 'true' should be + /// returned. If DumpAsm is true, the generated assembly is printed to cerr. + virtual bool addCodeEmitter(PassManagerBase &, CodeGenOpt::Level, + bool /*DumpAsm*/, MachineCodeEmitter &) { + return true; + } + + /// addCodeEmitter - This pass should be overridden by the target to add a + /// code emitter, if supported. If this is not supported, 'true' should be + /// returned. If DumpAsm is true, the generated assembly is printed to cerr. + virtual bool addCodeEmitter(PassManagerBase &, CodeGenOpt::Level, + bool /*DumpAsm*/, JITCodeEmitter &) { + return true; + } + + /// addSimpleCodeEmitter - This pass should be overridden by the target to add + /// a code emitter (without setting flags), if supported. If this is not + /// supported, 'true' should be returned. If DumpAsm is true, the generated + /// assembly is printed to cerr. + virtual bool addSimpleCodeEmitter(PassManagerBase &, CodeGenOpt::Level, + bool /*DumpAsm*/, MachineCodeEmitter &) { + return true; + } + + /// addSimpleCodeEmitter - This pass should be overridden by the target to add + /// a code emitter (without setting flags), if supported. If this is not + /// supported, 'true' should be returned. If DumpAsm is true, the generated + /// assembly is printed to cerr. + virtual bool addSimpleCodeEmitter(PassManagerBase &, CodeGenOpt::Level, + bool /*DumpAsm*/, JITCodeEmitter &) { + return true; + } + + /// getEnableTailMergeDefault - the default setting for -enable-tail-merge + /// on this target. User flag overrides. + virtual bool getEnableTailMergeDefault() const { return true; } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetMachineRegistry.h b/include/llvm/Target/TargetMachineRegistry.h new file mode 100644 index 0000000000000..b7ea448b20355 --- /dev/null +++ b/include/llvm/Target/TargetMachineRegistry.h @@ -0,0 +1,97 @@ +//===-- Target/TargetMachineRegistry.h - Target Registration ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file exposes two classes: the TargetMachineRegistry class, which allows +// tools to inspect all of registered targets, and the RegisterTarget class, +// which TargetMachine implementations should use to register themselves with +// the system. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETMACHINEREGISTRY_H +#define LLVM_TARGET_TARGETMACHINEREGISTRY_H + +#include "llvm/Module.h" +#include "llvm/Support/Registry.h" + +namespace llvm { + class Module; + class TargetMachine; + + struct TargetMachineRegistryEntry { + const char *Name; + const char *ShortDesc; + TargetMachine *(*CtorFn)(const Module &, const std::string &); + unsigned (*ModuleMatchQualityFn)(const Module &M); + unsigned (*JITMatchQualityFn)(); + + public: + TargetMachineRegistryEntry(const char *N, const char *SD, + TargetMachine *(*CF)(const Module &, const std::string &), + unsigned (*MMF)(const Module &M), + unsigned (*JMF)()) + : Name(N), ShortDesc(SD), CtorFn(CF), ModuleMatchQualityFn(MMF), + JITMatchQualityFn(JMF) {} + }; + + template<> + class RegistryTraits<TargetMachine> { + public: + typedef TargetMachineRegistryEntry entry; + + static const char *nameof(const entry &Entry) { return Entry.Name; } + static const char *descof(const entry &Entry) { return Entry.ShortDesc; } + }; + + struct TargetMachineRegistry : public Registry<TargetMachine> { + /// getClosestStaticTargetForModule - Given an LLVM module, pick the best + /// target that is compatible with the module. If no close target can be + /// found, this returns null and sets the Error string to a reason. + static const entry *getClosestStaticTargetForModule(const Module &M, + std::string &Error); + + /// getClosestTargetForJIT - Pick the best target that is compatible with + /// the current host. If no close target can be found, this returns null + /// and sets the Error string to a reason. + static const entry *getClosestTargetForJIT(std::string &Error); + + }; + + //===--------------------------------------------------------------------===// + /// RegisterTarget - This class is used to make targets automatically register + /// themselves with the tool they are linked. Targets should define an + /// instance of this and implement the static methods described in the + /// TargetMachine comments. + /// The type 'TargetMachineImpl' should provide a constructor with two + /// parameters: + /// - const Module& M: the module that is being compiled: + /// - const std::string& FS: target-specific string describing target + /// flavour. + + template<class TargetMachineImpl> + struct RegisterTarget { + RegisterTarget(const char *Name, const char *ShortDesc) + : Entry(Name, ShortDesc, &Allocator, + &TargetMachineImpl::getModuleMatchQuality, + &TargetMachineImpl::getJITMatchQuality), + Node(Entry) + {} + + private: + TargetMachineRegistry::entry Entry; + TargetMachineRegistry::node Node; + + static TargetMachine *Allocator(const Module &M, const std::string &FS) { + return new TargetMachineImpl(M, FS); + } + }; + +} + +#endif diff --git a/include/llvm/Target/TargetOptions.h b/include/llvm/Target/TargetOptions.h new file mode 100644 index 0000000000000..06d7d79441ec4 --- /dev/null +++ b/include/llvm/Target/TargetOptions.h @@ -0,0 +1,126 @@ +//===-- llvm/Target/TargetOptions.h - Target Options ------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines command line option flags that are shared across various +// targets. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETOPTIONS_H +#define LLVM_TARGET_TARGETOPTIONS_H + +namespace llvm { + /// PrintMachineCode - This flag is enabled when the -print-machineinstrs + /// option is specified on the command line, and should enable debugging + /// output from the code generator. + extern bool PrintMachineCode; + + /// NoFramePointerElim - This flag is enabled when the -disable-fp-elim is + /// specified on the command line. If the target supports the frame pointer + /// elimination optimization, this option should disable it. + extern bool NoFramePointerElim; + + /// LessPreciseFPMAD - This flag is enabled when the + /// -enable-fp-mad is specified on the command line. When this flag is off + /// (the default), the code generator is not allowed to generate mad + /// (multiply add) if the result is "less precise" than doing those operations + /// individually. + extern bool LessPreciseFPMADOption; + extern bool LessPreciseFPMAD(); + + /// NoExcessFPPrecision - This flag is enabled when the + /// -disable-excess-fp-precision flag is specified on the command line. When + /// this flag is off (the default), the code generator is allowed to produce + /// results that are "more precise" than IEEE allows. This includes use of + /// FMA-like operations and use of the X86 FP registers without rounding all + /// over the place. + extern bool NoExcessFPPrecision; + + /// UnsafeFPMath - This flag is enabled when the + /// -enable-unsafe-fp-math flag is specified on the command line. When + /// this flag is off (the default), the code generator is not allowed to + /// produce results that are "less precise" than IEEE allows. This includes + /// use of X86 instructions like FSIN and FCOS instead of libcalls. + /// UnsafeFPMath implies FiniteOnlyFPMath and LessPreciseFPMAD. + extern bool UnsafeFPMath; + + /// FiniteOnlyFPMath - This returns true when the -enable-finite-only-fp-math + /// option is specified on the command line. If this returns false (default), + /// the code generator is not allowed to assume that FP arithmetic arguments + /// and results are never NaNs or +-Infs. + extern bool FiniteOnlyFPMathOption; + extern bool FiniteOnlyFPMath(); + + /// HonorSignDependentRoundingFPMath - This returns true when the + /// -enable-sign-dependent-rounding-fp-math is specified. If this returns + /// false (the default), the code generator is allowed to assume that the + /// rounding behavior is the default (round-to-zero for all floating point to + /// integer conversions, and round-to-nearest for all other arithmetic + /// truncations). If this is enabled (set to true), the code generator must + /// assume that the rounding mode may dynamically change. + extern bool HonorSignDependentRoundingFPMathOption; + extern bool HonorSignDependentRoundingFPMath(); + + /// UseSoftFloat - This flag is enabled when the -soft-float flag is specified + /// on the command line. When this flag is on, the code generator will + /// generate libcalls to the software floating point library instead of + /// target FP instructions. + extern bool UseSoftFloat; + + /// NoImplicitFloat - This flag is enabled when the -no-implicit-float flag is + /// specified on the command line. When this flag is on, the code generator + /// won't generate any implicit floating point instructions. I.e., no XMM or + /// x87 or vectorized memcpy/memmove instructions. This is for X86 only. + extern bool NoImplicitFloat; + + /// NoZerosInBSS - By default some codegens place zero-initialized data to + /// .bss section. This flag disables such behaviour (necessary, e.g. for + /// crt*.o compiling). + extern bool NoZerosInBSS; + + /// ExceptionHandling - This flag indicates that exception information should + /// be emitted. + extern bool ExceptionHandling; + + /// UnwindTablesMandatory - This flag indicates that unwind tables should + /// be emitted for all functions. + extern bool UnwindTablesMandatory; + + /// PerformTailCallOpt - This flag is enabled when -tailcallopt is specified + /// on the commandline. When the flag is on, the target will perform tail call + /// optimization (pop the caller's stack) providing it supports it. + extern bool PerformTailCallOpt; + + /// StackAlignment - Override default stack alignment for target. + extern unsigned StackAlignment; + + /// RealignStack - This flag indicates, whether stack should be automatically + /// realigned, if needed. + extern bool RealignStack; + + /// DisableJumpTables - This flag indicates jump tables should not be + /// generated. + extern bool DisableJumpTables; + + /// EnableFastISel - This flag enables fast-path instruction selection + /// which trades away generated code quality in favor of reducing + /// compile time. + extern bool EnableFastISel; + + /// StrongPHIElim - This flag enables more aggressive PHI elimination + /// wth earlier copy coalescing. + extern bool StrongPHIElim; + + /// DisableRedZone - This flag disables use of the "Red Zone" on + /// targets which would otherwise have one. + extern bool DisableRedZone; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetRegisterInfo.h b/include/llvm/Target/TargetRegisterInfo.h new file mode 100644 index 0000000000000..0218bfdb2ae39 --- /dev/null +++ b/include/llvm/Target/TargetRegisterInfo.h @@ -0,0 +1,656 @@ +//=== Target/TargetRegisterInfo.h - Target Register Information -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes an abstract interface used to get information about a +// target machines register file. This information is used for a variety of +// purposed, especially register allocation. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETREGISTERINFO_H +#define LLVM_TARGET_TARGETREGISTERINFO_H + +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/ADT/DenseSet.h" +#include <cassert> +#include <functional> + +namespace llvm { + +class BitVector; +class MachineFunction; +class MachineMove; +class RegScavenger; + +/// TargetRegisterDesc - This record contains all of the information known about +/// a particular register. The AliasSet field (if not null) contains a pointer +/// to a Zero terminated array of registers that this register aliases. This is +/// needed for architectures like X86 which have AL alias AX alias EAX. +/// Registers that this does not apply to simply should set this to null. +/// The SubRegs field is a zero terminated array of registers that are +/// sub-registers of the specific register, e.g. AL, AH are sub-registers of AX. +/// The SuperRegs field is a zero terminated array of registers that are +/// super-registers of the specific register, e.g. RAX, EAX, are super-registers +/// of AX. +/// +struct TargetRegisterDesc { + const char *AsmName; // Assembly language name for the register + const char *Name; // Printable name for the reg (for debugging) + const unsigned *AliasSet; // Register Alias Set, described above + const unsigned *SubRegs; // Sub-register set, described above + const unsigned *SuperRegs; // Super-register set, described above +}; + +class TargetRegisterClass { +public: + typedef const unsigned* iterator; + typedef const unsigned* const_iterator; + + typedef const MVT* vt_iterator; + typedef const TargetRegisterClass* const * sc_iterator; +private: + unsigned ID; + const char *Name; + const vt_iterator VTs; + const sc_iterator SubClasses; + const sc_iterator SuperClasses; + const sc_iterator SubRegClasses; + const sc_iterator SuperRegClasses; + const unsigned RegSize, Alignment; // Size & Alignment of register in bytes + const int CopyCost; + const iterator RegsBegin, RegsEnd; + DenseSet<unsigned> RegSet; +public: + TargetRegisterClass(unsigned id, + const char *name, + const MVT *vts, + const TargetRegisterClass * const *subcs, + const TargetRegisterClass * const *supcs, + const TargetRegisterClass * const *subregcs, + const TargetRegisterClass * const *superregcs, + unsigned RS, unsigned Al, int CC, + iterator RB, iterator RE) + : ID(id), Name(name), VTs(vts), SubClasses(subcs), SuperClasses(supcs), + SubRegClasses(subregcs), SuperRegClasses(superregcs), + RegSize(RS), Alignment(Al), CopyCost(CC), RegsBegin(RB), RegsEnd(RE) { + for (iterator I = RegsBegin, E = RegsEnd; I != E; ++I) + RegSet.insert(*I); + } + virtual ~TargetRegisterClass() {} // Allow subclasses + + /// getID() - Return the register class ID number. + /// + unsigned getID() const { return ID; } + + /// getName() - Return the register class name for debugging. + /// + const char *getName() const { return Name; } + + /// begin/end - Return all of the registers in this class. + /// + iterator begin() const { return RegsBegin; } + iterator end() const { return RegsEnd; } + + /// getNumRegs - Return the number of registers in this class. + /// + unsigned getNumRegs() const { return (unsigned)(RegsEnd-RegsBegin); } + + /// getRegister - Return the specified register in the class. + /// + unsigned getRegister(unsigned i) const { + assert(i < getNumRegs() && "Register number out of range!"); + return RegsBegin[i]; + } + + /// contains - Return true if the specified register is included in this + /// register class. + bool contains(unsigned Reg) const { + return RegSet.count(Reg); + } + + /// hasType - return true if this TargetRegisterClass has the ValueType vt. + /// + bool hasType(MVT vt) const { + for(int i = 0; VTs[i] != MVT::Other; ++i) + if (VTs[i] == vt) + return true; + return false; + } + + /// vt_begin / vt_end - Loop over all of the value types that can be + /// represented by values in this register class. + vt_iterator vt_begin() const { + return VTs; + } + + vt_iterator vt_end() const { + vt_iterator I = VTs; + while (*I != MVT::Other) ++I; + return I; + } + + /// subregclasses_begin / subregclasses_end - Loop over all of + /// the subreg register classes of this register class. + sc_iterator subregclasses_begin() const { + return SubRegClasses; + } + + sc_iterator subregclasses_end() const { + sc_iterator I = SubRegClasses; + while (*I != NULL) ++I; + return I; + } + + /// getSubRegisterRegClass - Return the register class of subregisters with + /// index SubIdx, or NULL if no such class exists. + const TargetRegisterClass* getSubRegisterRegClass(unsigned SubIdx) const { + assert(SubIdx>0 && "Invalid subregister index"); + for (unsigned s = 0; s != SubIdx-1; ++s) + if (!SubRegClasses[s]) + return NULL; + return SubRegClasses[SubIdx-1]; + } + + /// superregclasses_begin / superregclasses_end - Loop over all of + /// the superreg register classes of this register class. + sc_iterator superregclasses_begin() const { + return SuperRegClasses; + } + + sc_iterator superregclasses_end() const { + sc_iterator I = SuperRegClasses; + while (*I != NULL) ++I; + return I; + } + + /// hasSubClass - return true if the the specified TargetRegisterClass + /// is a proper subset of this TargetRegisterClass. + bool hasSubClass(const TargetRegisterClass *cs) const { + for (int i = 0; SubClasses[i] != NULL; ++i) + if (SubClasses[i] == cs) + return true; + return false; + } + + /// subclasses_begin / subclasses_end - Loop over all of the classes + /// that are proper subsets of this register class. + sc_iterator subclasses_begin() const { + return SubClasses; + } + + sc_iterator subclasses_end() const { + sc_iterator I = SubClasses; + while (*I != NULL) ++I; + return I; + } + + /// hasSuperClass - return true if the specified TargetRegisterClass is a + /// proper superset of this TargetRegisterClass. + bool hasSuperClass(const TargetRegisterClass *cs) const { + for (int i = 0; SuperClasses[i] != NULL; ++i) + if (SuperClasses[i] == cs) + return true; + return false; + } + + /// superclasses_begin / superclasses_end - Loop over all of the classes + /// that are proper supersets of this register class. + sc_iterator superclasses_begin() const { + return SuperClasses; + } + + sc_iterator superclasses_end() const { + sc_iterator I = SuperClasses; + while (*I != NULL) ++I; + return I; + } + + /// isASubClass - return true if this TargetRegisterClass is a subset + /// class of at least one other TargetRegisterClass. + bool isASubClass() const { + return SuperClasses[0] != 0; + } + + /// allocation_order_begin/end - These methods define a range of registers + /// which specify the registers in this class that are valid to register + /// allocate, and the preferred order to allocate them in. For example, + /// callee saved registers should be at the end of the list, because it is + /// cheaper to allocate caller saved registers. + /// + /// These methods take a MachineFunction argument, which can be used to tune + /// the allocatable registers based on the characteristics of the function. + /// One simple example is that the frame pointer register can be used if + /// frame-pointer-elimination is performed. + /// + /// By default, these methods return all registers in the class. + /// + virtual iterator allocation_order_begin(const MachineFunction &MF) const { + return begin(); + } + virtual iterator allocation_order_end(const MachineFunction &MF) const { + return end(); + } + + /// getSize - Return the size of the register in bytes, which is also the size + /// of a stack slot allocated to hold a spilled copy of this register. + unsigned getSize() const { return RegSize; } + + /// getAlignment - Return the minimum required alignment for a register of + /// this class. + unsigned getAlignment() const { return Alignment; } + + /// getCopyCost - Return the cost of copying a value between two registers in + /// this class. A negative number means the register class is very expensive + /// to copy e.g. status flag register classes. + int getCopyCost() const { return CopyCost; } +}; + + +/// TargetRegisterInfo base class - We assume that the target defines a static +/// array of TargetRegisterDesc objects that represent all of the machine +/// registers that the target has. As such, we simply have to track a pointer +/// to this array so that we can turn register number into a register +/// descriptor. +/// +class TargetRegisterInfo { +protected: + const unsigned* SubregHash; + const unsigned SubregHashSize; + const unsigned* SuperregHash; + const unsigned SuperregHashSize; + const unsigned* AliasesHash; + const unsigned AliasesHashSize; +public: + typedef const TargetRegisterClass * const * regclass_iterator; +private: + const TargetRegisterDesc *Desc; // Pointer to the descriptor array + unsigned NumRegs; // Number of entries in the array + + regclass_iterator RegClassBegin, RegClassEnd; // List of regclasses + + int CallFrameSetupOpcode, CallFrameDestroyOpcode; +protected: + TargetRegisterInfo(const TargetRegisterDesc *D, unsigned NR, + regclass_iterator RegClassBegin, + regclass_iterator RegClassEnd, + int CallFrameSetupOpcode = -1, + int CallFrameDestroyOpcode = -1, + const unsigned* subregs = 0, + const unsigned subregsize = 0, + const unsigned* superregs = 0, + const unsigned superregsize = 0, + const unsigned* aliases = 0, + const unsigned aliasessize = 0); + virtual ~TargetRegisterInfo(); +public: + + enum { // Define some target independent constants + /// NoRegister - This physical register is not a real target register. It + /// is useful as a sentinal. + NoRegister = 0, + + /// FirstVirtualRegister - This is the first register number that is + /// considered to be a 'virtual' register, which is part of the SSA + /// namespace. This must be the same for all targets, which means that each + /// target is limited to 1024 registers. + FirstVirtualRegister = 1024 + }; + + /// isPhysicalRegister - Return true if the specified register number is in + /// the physical register namespace. + static bool isPhysicalRegister(unsigned Reg) { + assert(Reg && "this is not a register!"); + return Reg < FirstVirtualRegister; + } + + /// isVirtualRegister - Return true if the specified register number is in + /// the virtual register namespace. + static bool isVirtualRegister(unsigned Reg) { + assert(Reg && "this is not a register!"); + return Reg >= FirstVirtualRegister; + } + + /// getPhysicalRegisterRegClass - Returns the Register Class of a physical + /// register of the given type. If type is MVT::Other, then just return any + /// register class the register belongs to. + virtual const TargetRegisterClass * + getPhysicalRegisterRegClass(unsigned Reg, MVT VT = MVT::Other) const; + + /// getAllocatableSet - Returns a bitset indexed by register number + /// indicating if a register is allocatable or not. If a register class is + /// specified, returns the subset for the class. + BitVector getAllocatableSet(MachineFunction &MF, + const TargetRegisterClass *RC = NULL) const; + + const TargetRegisterDesc &operator[](unsigned RegNo) const { + assert(RegNo < NumRegs && + "Attempting to access record for invalid register number!"); + return Desc[RegNo]; + } + + /// Provide a get method, equivalent to [], but more useful if we have a + /// pointer to this object. + /// + const TargetRegisterDesc &get(unsigned RegNo) const { + return operator[](RegNo); + } + + /// getAliasSet - Return the set of registers aliased by the specified + /// register, or a null list of there are none. The list returned is zero + /// terminated. + /// + const unsigned *getAliasSet(unsigned RegNo) const { + return get(RegNo).AliasSet; + } + + /// getSubRegisters - Return the list of registers that are sub-registers of + /// the specified register, or a null list of there are none. The list + /// returned is zero terminated and sorted according to super-sub register + /// relations. e.g. X86::RAX's sub-register list is EAX, AX, AL, AH. + /// + const unsigned *getSubRegisters(unsigned RegNo) const { + return get(RegNo).SubRegs; + } + + /// getSuperRegisters - Return the list of registers that are super-registers + /// of the specified register, or a null list of there are none. The list + /// returned is zero terminated and sorted according to super-sub register + /// relations. e.g. X86::AL's super-register list is RAX, EAX, AX. + /// + const unsigned *getSuperRegisters(unsigned RegNo) const { + return get(RegNo).SuperRegs; + } + + /// getAsmName - Return the symbolic target-specific name for the + /// specified physical register. + const char *getAsmName(unsigned RegNo) const { + return get(RegNo).AsmName; + } + + /// getName - Return the human-readable symbolic target-specific name for the + /// specified physical register. + const char *getName(unsigned RegNo) const { + return get(RegNo).Name; + } + + /// getNumRegs - Return the number of registers this target has (useful for + /// sizing arrays holding per register information) + unsigned getNumRegs() const { + return NumRegs; + } + + /// areAliases - Returns true if the two registers alias each other, false + /// otherwise + bool areAliases(unsigned regA, unsigned regB) const { + size_t index = (regA + regB * 37) & (AliasesHashSize-1); + unsigned ProbeAmt = 0; + while (AliasesHash[index*2] != 0 && + AliasesHash[index*2+1] != 0) { + if (AliasesHash[index*2] == regA && AliasesHash[index*2+1] == regB) + return true; + + index = (index + ProbeAmt) & (AliasesHashSize-1); + ProbeAmt += 2; + } + + return false; + } + + /// regsOverlap - Returns true if the two registers are equal or alias each + /// other. The registers may be virtual register. + bool regsOverlap(unsigned regA, unsigned regB) const { + if (regA == regB) + return true; + + if (isVirtualRegister(regA) || isVirtualRegister(regB)) + return false; + return areAliases(regA, regB); + } + + /// isSubRegister - Returns true if regB is a sub-register of regA. + /// + bool isSubRegister(unsigned regA, unsigned regB) const { + // SubregHash is a simple quadratically probed hash table. + size_t index = (regA + regB * 37) & (SubregHashSize-1); + unsigned ProbeAmt = 2; + while (SubregHash[index*2] != 0 && + SubregHash[index*2+1] != 0) { + if (SubregHash[index*2] == regA && SubregHash[index*2+1] == regB) + return true; + + index = (index + ProbeAmt) & (SubregHashSize-1); + ProbeAmt += 2; + } + + return false; + } + + /// isSuperRegister - Returns true if regB is a super-register of regA. + /// + bool isSuperRegister(unsigned regA, unsigned regB) const { + // SuperregHash is a simple quadratically probed hash table. + size_t index = (regA + regB * 37) & (SuperregHashSize-1); + unsigned ProbeAmt = 2; + while (SuperregHash[index*2] != 0 && + SuperregHash[index*2+1] != 0) { + if (SuperregHash[index*2] == regA && SuperregHash[index*2+1] == regB) + return true; + + index = (index + ProbeAmt) & (SuperregHashSize-1); + ProbeAmt += 2; + } + + return false; + } + + /// getCalleeSavedRegs - Return a null-terminated list of all of the + /// callee saved registers on this target. The register should be in the + /// order of desired callee-save stack frame offset. The first register is + /// closed to the incoming stack pointer if stack grows down, and vice versa. + virtual const unsigned* getCalleeSavedRegs(const MachineFunction *MF = 0) + const = 0; + + /// getCalleeSavedRegClasses - Return a null-terminated list of the preferred + /// register classes to spill each callee saved register with. The order and + /// length of this list match the getCalleeSaveRegs() list. + virtual const TargetRegisterClass* const *getCalleeSavedRegClasses( + const MachineFunction *MF) const =0; + + /// getReservedRegs - Returns a bitset indexed by physical register number + /// indicating if a register is a special register that has particular uses + /// and should be considered unavailable at all times, e.g. SP, RA. This is + /// used by register scavenger to determine what registers are free. + virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0; + + /// getSubReg - Returns the physical register number of sub-register "Index" + /// for physical register RegNo. Return zero if the sub-register does not + /// exist. + virtual unsigned getSubReg(unsigned RegNo, unsigned Index) const = 0; + + /// getMatchingSuperReg - Return a super-register of the specified register + /// Reg so its sub-register of index SubIdx is Reg. + unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, + const TargetRegisterClass *RC) const { + for (const unsigned *SRs = getSuperRegisters(Reg); unsigned SR = *SRs;++SRs) + if (Reg == getSubReg(SR, SubIdx) && RC->contains(SR)) + return SR; + return 0; + } + + //===--------------------------------------------------------------------===// + // Register Class Information + // + + /// Register class iterators + /// + regclass_iterator regclass_begin() const { return RegClassBegin; } + regclass_iterator regclass_end() const { return RegClassEnd; } + + unsigned getNumRegClasses() const { + return (unsigned)(regclass_end()-regclass_begin()); + } + + /// getRegClass - Returns the register class associated with the enumeration + /// value. See class TargetOperandInfo. + const TargetRegisterClass *getRegClass(unsigned i) const { + assert(i <= getNumRegClasses() && "Register Class ID out of range"); + return i ? RegClassBegin[i - 1] : NULL; + } + + /// getPointerRegClass - Returns a TargetRegisterClass used for pointer + /// values. + virtual const TargetRegisterClass *getPointerRegClass() const { + assert(0 && "Target didn't implement getPointerRegClass!"); + return 0; // Must return a value in order to compile with VS 2005 + } + + /// getCrossCopyRegClass - Returns a legal register class to copy a register + /// in the specified class to or from. Returns NULL if it is possible to copy + /// between a two registers of the specified class. + virtual const TargetRegisterClass * + getCrossCopyRegClass(const TargetRegisterClass *RC) const { + return NULL; + } + + /// targetHandlesStackFrameRounding - Returns true if the target is + /// responsible for rounding up the stack frame (probably at emitPrologue + /// time). + virtual bool targetHandlesStackFrameRounding() const { + return false; + } + + /// requiresRegisterScavenging - returns true if the target requires (and can + /// make use of) the register scavenger. + virtual bool requiresRegisterScavenging(const MachineFunction &MF) const { + return false; + } + + /// hasFP - Return true if the specified function should have a dedicated + /// frame pointer register. For most targets this is true only if the function + /// has variable sized allocas or if frame pointer elimination is disabled. + virtual bool hasFP(const MachineFunction &MF) const = 0; + + // hasReservedCallFrame - Under normal circumstances, when a frame pointer is + // not required, we reserve argument space for call sites in the function + // immediately on entry to the current function. This eliminates the need for + // add/sub sp brackets around call sites. Returns true if the call frame is + // included as part of the stack frame. + virtual bool hasReservedCallFrame(MachineFunction &MF) const { + return !hasFP(MF); + } + + // needsStackRealignment - true if storage within the function requires the + // stack pointer to be aligned more than the normal calling convention calls + // for. + virtual bool needsStackRealignment(const MachineFunction &MF) const { + return false; + } + + /// getCallFrameSetup/DestroyOpcode - These methods return the opcode of the + /// frame setup/destroy instructions if they exist (-1 otherwise). Some + /// targets use pseudo instructions in order to abstract away the difference + /// between operating with a frame pointer and operating without, through the + /// use of these two instructions. + /// + int getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; } + int getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; } + + /// eliminateCallFramePseudoInstr - This method is called during prolog/epilog + /// code insertion to eliminate call frame setup and destroy pseudo + /// instructions (but only if the Target is using them). It is responsible + /// for eliminating these instructions, replacing them with concrete + /// instructions. This method need only be implemented if using call frame + /// setup/destroy pseudo instructions. + /// + virtual void + eliminateCallFramePseudoInstr(MachineFunction &MF, + MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI) const { + assert(getCallFrameSetupOpcode()== -1 && getCallFrameDestroyOpcode()== -1 && + "eliminateCallFramePseudoInstr must be implemented if using" + " call frame setup/destroy pseudo instructions!"); + assert(0 && "Call Frame Pseudo Instructions do not exist on this target!"); + } + + /// processFunctionBeforeCalleeSavedScan - This method is called immediately + /// before PrologEpilogInserter scans the physical registers used to determine + /// what callee saved registers should be spilled. This method is optional. + virtual void processFunctionBeforeCalleeSavedScan(MachineFunction &MF, + RegScavenger *RS = NULL) const { + + } + + /// processFunctionBeforeFrameFinalized - This method is called immediately + /// before the specified functions frame layout (MF.getFrameInfo()) is + /// finalized. Once the frame is finalized, MO_FrameIndex operands are + /// replaced with direct constants. This method is optional. + /// + virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF) const { + } + + /// eliminateFrameIndex - This method must be overriden to eliminate abstract + /// frame indices from instructions which may use them. The instruction + /// referenced by the iterator contains an MO_FrameIndex operand which must be + /// eliminated by this method. This method may modify or replace the + /// specified instruction, as long as it keeps the iterator pointing the the + /// finished product. SPAdj is the SP adjustment due to call frame setup + /// instruction. + virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI, + int SPAdj, RegScavenger *RS=NULL) const = 0; + + /// emitProlog/emitEpilog - These methods insert prolog and epilog code into + /// the function. + virtual void emitPrologue(MachineFunction &MF) const = 0; + virtual void emitEpilogue(MachineFunction &MF, + MachineBasicBlock &MBB) const = 0; + + //===--------------------------------------------------------------------===// + /// Debug information queries. + + /// getDwarfRegNum - Map a target register to an equivalent dwarf register + /// number. Returns -1 if there is no equivalent value. The second + /// parameter allows targets to use different numberings for EH info and + /// debugging info. + virtual int getDwarfRegNum(unsigned RegNum, bool isEH) const = 0; + + /// getFrameRegister - This method should return the register used as a base + /// for values allocated in the current stack frame. + virtual unsigned getFrameRegister(MachineFunction &MF) const = 0; + + /// getFrameIndexOffset - Returns the displacement from the frame register to + /// the stack frame of the specified index. + virtual int getFrameIndexOffset(MachineFunction &MF, int FI) const; + + /// getRARegister - This method should return the register where the return + /// address can be found. + virtual unsigned getRARegister() const = 0; + + /// getInitialFrameState - Returns a list of machine moves that are assumed + /// on entry to all functions. Note that LabelID is ignored (assumed to be + /// the beginning of the function.) + virtual void getInitialFrameState(std::vector<MachineMove> &Moves) const; +}; + + +// This is useful when building IndexedMaps keyed on virtual registers +struct VirtReg2IndexFunctor : std::unary_function<unsigned, unsigned> { + unsigned operator()(unsigned Reg) const { + return Reg - TargetRegisterInfo::FirstVirtualRegister; + } +}; + +/// getCommonSubClass - find the largest common subclass of A and B. Return NULL +/// if there is no common subclass. +const TargetRegisterClass *getCommonSubClass(const TargetRegisterClass *A, + const TargetRegisterClass *B); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetSchedule.td b/include/llvm/Target/TargetSchedule.td new file mode 100644 index 0000000000000..38461c5a380ed --- /dev/null +++ b/include/llvm/Target/TargetSchedule.td @@ -0,0 +1,72 @@ +//===- TargetSchedule.td - Target Independent Scheduling ---*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the target-independent scheduling interfaces which should +// be implemented by each target which is using TableGen based scheduling. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Processor functional unit - These values represent the function units +// available across all chip sets for the target. Eg., IntUnit, FPUnit, ... +// These may be independent values for each chip set or may be shared across +// all chip sets of the target. Each functional unit is treated as a resource +// during scheduling and has an affect instruction order based on availability +// during a time interval. +// +class FuncUnit; + +//===----------------------------------------------------------------------===// +// Instruction stage - These values represent a step in the execution of an +// instruction. The latency represents the number of discrete time slots used +// need to complete the stage. Units represent the choice of functional units +// that can be used to complete the stage. Eg. IntUnit1, IntUnit2. +// +class InstrStage<int cycles, list<FuncUnit> units> { + int Cycles = cycles; // length of stage in machine cycles + list<FuncUnit> Units = units; // choice of functional units +} + +//===----------------------------------------------------------------------===// +// Instruction itinerary - An itinerary represents a sequential series of steps +// required to complete an instruction. Itineraries are represented as lists of +// instruction stages. +// + +//===----------------------------------------------------------------------===// +// Instruction itinerary classes - These values represent 'named' instruction +// itinerary. Using named itineraries simplifies managing groups of +// instructions across chip sets. An instruction uses the same itinerary class +// across all chip sets. Thus a new chip set can be added without modifying +// instruction information. +// +class InstrItinClass; +def NoItinerary : InstrItinClass; + +//===----------------------------------------------------------------------===// +// Instruction itinerary data - These values provide a runtime map of an +// instruction itinerary class (name) to it's itinerary data. +// +class InstrItinData<InstrItinClass Class, list<InstrStage> stages> { + InstrItinClass TheClass = Class; + list<InstrStage> Stages = stages; +} + +//===----------------------------------------------------------------------===// +// Processor itineraries - These values represent the set of all itinerary +// classes for a given chip set. +// +class ProcessorItineraries<list<InstrItinData> iid> { + list<InstrItinData> IID = iid; +} + +// NoItineraries - A marker that can be used by processors without schedule +// info. +def NoItineraries : ProcessorItineraries<[]>; + diff --git a/include/llvm/Target/TargetSelectionDAG.td b/include/llvm/Target/TargetSelectionDAG.td new file mode 100644 index 0000000000000..2cd29676dbfd8 --- /dev/null +++ b/include/llvm/Target/TargetSelectionDAG.td @@ -0,0 +1,864 @@ +//===- TargetSelectionDAG.td - Common code for DAG isels ---*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the target-independent interfaces used by SelectionDAG +// instruction selection generators. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Selection DAG Type Constraint definitions. +// +// Note that the semantics of these constraints are hard coded into tblgen. To +// modify or add constraints, you have to hack tblgen. +// + +class SDTypeConstraint<int opnum> { + int OperandNum = opnum; +} + +// SDTCisVT - The specified operand has exactly this VT. +class SDTCisVT<int OpNum, ValueType vt> : SDTypeConstraint<OpNum> { + ValueType VT = vt; +} + +class SDTCisPtrTy<int OpNum> : SDTypeConstraint<OpNum>; + +// SDTCisInt - The specified operand is has integer type. +class SDTCisInt<int OpNum> : SDTypeConstraint<OpNum>; + +// SDTCisFP - The specified operand is has floating point type. +class SDTCisFP<int OpNum> : SDTypeConstraint<OpNum>; + +// SDTCisSameAs - The two specified operands have identical types. +class SDTCisSameAs<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> { + int OtherOperandNum = OtherOp; +} + +// SDTCisVTSmallerThanOp - The specified operand is a VT SDNode, and its type is +// smaller than the 'Other' operand. +class SDTCisVTSmallerThanOp<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> { + int OtherOperandNum = OtherOp; +} + +class SDTCisOpSmallerThanOp<int SmallOp, int BigOp> : SDTypeConstraint<SmallOp>{ + int BigOperandNum = BigOp; +} + +/// SDTCisEltOfVec - This indicates that ThisOp is a scalar type of the same +/// type as the element type of OtherOp, which is a vector type. +class SDTCisEltOfVec<int ThisOp, int OtherOp> + : SDTypeConstraint<ThisOp> { + int OtherOpNum = OtherOp; +} + +//===----------------------------------------------------------------------===// +// Selection DAG Type Profile definitions. +// +// These use the constraints defined above to describe the type requirements of +// the various nodes. These are not hard coded into tblgen, allowing targets to +// add their own if needed. +// + +// SDTypeProfile - This profile describes the type requirements of a Selection +// DAG node. +class SDTypeProfile<int numresults, int numoperands, + list<SDTypeConstraint> constraints> { + int NumResults = numresults; + int NumOperands = numoperands; + list<SDTypeConstraint> Constraints = constraints; +} + +// Builtin profiles. +def SDTIntLeaf: SDTypeProfile<1, 0, [SDTCisInt<0>]>; // for 'imm'. +def SDTFPLeaf : SDTypeProfile<1, 0, [SDTCisFP<0>]>; // for 'fpimm'. +def SDTPtrLeaf: SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>; // for '&g'. +def SDTOther : SDTypeProfile<1, 0, [SDTCisVT<0, OtherVT>]>; // for 'vt'. +def SDTUNDEF : SDTypeProfile<1, 0, []>; // for 'undef'. +def SDTUnaryOp : SDTypeProfile<1, 1, []>; // for bitconvert. + +def SDTIntBinOp : SDTypeProfile<1, 2, [ // add, and, or, xor, udiv, etc. + SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisInt<0> +]>; +def SDTIntShiftOp : SDTypeProfile<1, 2, [ // shl, sra, srl + SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2> +]>; +def SDTFPBinOp : SDTypeProfile<1, 2, [ // fadd, fmul, etc. + SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisFP<0> +]>; +def SDTFPSignOp : SDTypeProfile<1, 2, [ // fcopysign. + SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisFP<2> +]>; +def SDTFPTernaryOp : SDTypeProfile<1, 3, [ // fmadd, fnmsub, etc. + SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisFP<0> +]>; +def SDTIntUnaryOp : SDTypeProfile<1, 1, [ // ctlz + SDTCisSameAs<0, 1>, SDTCisInt<0> +]>; +def SDTIntExtendOp : SDTypeProfile<1, 1, [ // sext, zext, anyext + SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0> +]>; +def SDTIntTruncOp : SDTypeProfile<1, 1, [ // trunc + SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<0, 1> +]>; +def SDTFPUnaryOp : SDTypeProfile<1, 1, [ // fneg, fsqrt, etc + SDTCisSameAs<0, 1>, SDTCisFP<0> +]>; +def SDTFPRoundOp : SDTypeProfile<1, 1, [ // fround + SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1> +]>; +def SDTFPExtendOp : SDTypeProfile<1, 1, [ // fextend + SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0> +]>; +def SDTIntToFPOp : SDTypeProfile<1, 1, [ // [su]int_to_fp + SDTCisFP<0>, SDTCisInt<1> +]>; +def SDTFPToIntOp : SDTypeProfile<1, 1, [ // fp_to_[su]int + SDTCisInt<0>, SDTCisFP<1> +]>; +def SDTExtInreg : SDTypeProfile<1, 2, [ // sext_inreg + SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisVT<2, OtherVT>, + SDTCisVTSmallerThanOp<2, 1> +]>; + +def SDTSetCC : SDTypeProfile<1, 3, [ // setcc + SDTCisInt<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT> +]>; + +def SDTSelect : SDTypeProfile<1, 3, [ // select + SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3> +]>; + +def SDTSelectCC : SDTypeProfile<1, 5, [ // select_cc + SDTCisSameAs<1, 2>, SDTCisSameAs<3, 4>, SDTCisSameAs<0, 3>, + SDTCisVT<5, OtherVT> +]>; + +def SDTBr : SDTypeProfile<0, 1, [ // br + SDTCisVT<0, OtherVT> +]>; + +def SDTBrcond : SDTypeProfile<0, 2, [ // brcond + SDTCisInt<0>, SDTCisVT<1, OtherVT> +]>; + +def SDTBrind : SDTypeProfile<0, 1, [ // brind + SDTCisPtrTy<0> +]>; + +def SDTNone : SDTypeProfile<0, 0, []>; // ret, trap + +def SDTLoad : SDTypeProfile<1, 1, [ // load + SDTCisPtrTy<1> +]>; + +def SDTStore : SDTypeProfile<0, 2, [ // store + SDTCisPtrTy<1> +]>; + +def SDTIStore : SDTypeProfile<1, 3, [ // indexed store + SDTCisSameAs<0, 2>, SDTCisPtrTy<0>, SDTCisPtrTy<3> +]>; + +def SDTVecShuffle : SDTypeProfile<1, 2, [ + SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2> +]>; +def SDTVecExtract : SDTypeProfile<1, 2, [ // vector extract + SDTCisEltOfVec<0, 1>, SDTCisPtrTy<2> +]>; +def SDTVecInsert : SDTypeProfile<1, 3, [ // vector insert + SDTCisEltOfVec<2, 1>, SDTCisSameAs<0, 1>, SDTCisPtrTy<3> +]>; + +def STDPrefetch : SDTypeProfile<0, 3, [ // prefetch + SDTCisPtrTy<0>, SDTCisSameAs<1, 2>, SDTCisInt<1> +]>; + +def STDMemBarrier : SDTypeProfile<0, 5, [ // memory barier + SDTCisSameAs<0,1>, SDTCisSameAs<0,2>, SDTCisSameAs<0,3>, SDTCisSameAs<0,4>, + SDTCisInt<0> +]>; +def STDAtomic3 : SDTypeProfile<1, 3, [ + SDTCisSameAs<0,2>, SDTCisSameAs<0,3>, SDTCisInt<0>, SDTCisPtrTy<1> +]>; +def STDAtomic2 : SDTypeProfile<1, 2, [ + SDTCisSameAs<0,2>, SDTCisInt<0>, SDTCisPtrTy<1> +]>; + +def SDTConvertOp : SDTypeProfile<1, 5, [ //cvtss, su, us, uu, ff, fs, fu, sf, su + SDTCisVT<2, OtherVT>, SDTCisVT<3, OtherVT>, SDTCisPtrTy<4>, SDTCisPtrTy<5> +]>; + +class SDCallSeqStart<list<SDTypeConstraint> constraints> : + SDTypeProfile<0, 1, constraints>; +class SDCallSeqEnd<list<SDTypeConstraint> constraints> : + SDTypeProfile<0, 2, constraints>; + +//===----------------------------------------------------------------------===// +// Selection DAG Node Properties. +// +// Note: These are hard coded into tblgen. +// +class SDNodeProperty; +def SDNPCommutative : SDNodeProperty; // X op Y == Y op X +def SDNPAssociative : SDNodeProperty; // (X op Y) op Z == X op (Y op Z) +def SDNPHasChain : SDNodeProperty; // R/W chain operand and result +def SDNPOutFlag : SDNodeProperty; // Write a flag result +def SDNPInFlag : SDNodeProperty; // Read a flag operand +def SDNPOptInFlag : SDNodeProperty; // Optionally read a flag operand +def SDNPMayStore : SDNodeProperty; // May write to memory, sets 'mayStore'. +def SDNPMayLoad : SDNodeProperty; // May read memory, sets 'mayLoad'. +def SDNPSideEffect : SDNodeProperty; // Sets 'HasUnmodelledSideEffects'. +def SDNPMemOperand : SDNodeProperty; // Touches memory, has assoc MemOperand + +//===----------------------------------------------------------------------===// +// Selection DAG Node definitions. +// +class SDNode<string opcode, SDTypeProfile typeprof, + list<SDNodeProperty> props = [], string sdclass = "SDNode"> { + string Opcode = opcode; + string SDClass = sdclass; + list<SDNodeProperty> Properties = props; + SDTypeProfile TypeProfile = typeprof; +} + +def set; +def implicit; +def parallel; +def node; +def srcvalue; + +def imm : SDNode<"ISD::Constant" , SDTIntLeaf , [], "ConstantSDNode">; +def timm : SDNode<"ISD::TargetConstant",SDTIntLeaf, [], "ConstantSDNode">; +def fpimm : SDNode<"ISD::ConstantFP", SDTFPLeaf , [], "ConstantFPSDNode">; +def vt : SDNode<"ISD::VALUETYPE" , SDTOther , [], "VTSDNode">; +def bb : SDNode<"ISD::BasicBlock", SDTOther , [], "BasicBlockSDNode">; +def cond : SDNode<"ISD::CONDCODE" , SDTOther , [], "CondCodeSDNode">; +def undef : SDNode<"ISD::UNDEF" , SDTUNDEF , []>; +def globaladdr : SDNode<"ISD::GlobalAddress", SDTPtrLeaf, [], + "GlobalAddressSDNode">; +def tglobaladdr : SDNode<"ISD::TargetGlobalAddress", SDTPtrLeaf, [], + "GlobalAddressSDNode">; +def globaltlsaddr : SDNode<"ISD::GlobalTLSAddress", SDTPtrLeaf, [], + "GlobalAddressSDNode">; +def tglobaltlsaddr : SDNode<"ISD::TargetGlobalTLSAddress", SDTPtrLeaf, [], + "GlobalAddressSDNode">; +def constpool : SDNode<"ISD::ConstantPool", SDTPtrLeaf, [], + "ConstantPoolSDNode">; +def tconstpool : SDNode<"ISD::TargetConstantPool", SDTPtrLeaf, [], + "ConstantPoolSDNode">; +def jumptable : SDNode<"ISD::JumpTable", SDTPtrLeaf, [], + "JumpTableSDNode">; +def tjumptable : SDNode<"ISD::TargetJumpTable", SDTPtrLeaf, [], + "JumpTableSDNode">; +def frameindex : SDNode<"ISD::FrameIndex", SDTPtrLeaf, [], + "FrameIndexSDNode">; +def tframeindex : SDNode<"ISD::TargetFrameIndex", SDTPtrLeaf, [], + "FrameIndexSDNode">; +def externalsym : SDNode<"ISD::ExternalSymbol", SDTPtrLeaf, [], + "ExternalSymbolSDNode">; +def texternalsym: SDNode<"ISD::TargetExternalSymbol", SDTPtrLeaf, [], + "ExternalSymbolSDNode">; + +def add : SDNode<"ISD::ADD" , SDTIntBinOp , + [SDNPCommutative, SDNPAssociative]>; +def sub : SDNode<"ISD::SUB" , SDTIntBinOp>; +def mul : SDNode<"ISD::MUL" , SDTIntBinOp, + [SDNPCommutative, SDNPAssociative]>; +def mulhs : SDNode<"ISD::MULHS" , SDTIntBinOp, [SDNPCommutative]>; +def mulhu : SDNode<"ISD::MULHU" , SDTIntBinOp, [SDNPCommutative]>; +def sdiv : SDNode<"ISD::SDIV" , SDTIntBinOp>; +def udiv : SDNode<"ISD::UDIV" , SDTIntBinOp>; +def srem : SDNode<"ISD::SREM" , SDTIntBinOp>; +def urem : SDNode<"ISD::UREM" , SDTIntBinOp>; +def srl : SDNode<"ISD::SRL" , SDTIntShiftOp>; +def sra : SDNode<"ISD::SRA" , SDTIntShiftOp>; +def shl : SDNode<"ISD::SHL" , SDTIntShiftOp>; +def rotl : SDNode<"ISD::ROTL" , SDTIntShiftOp>; +def rotr : SDNode<"ISD::ROTR" , SDTIntShiftOp>; +def and : SDNode<"ISD::AND" , SDTIntBinOp, + [SDNPCommutative, SDNPAssociative]>; +def or : SDNode<"ISD::OR" , SDTIntBinOp, + [SDNPCommutative, SDNPAssociative]>; +def xor : SDNode<"ISD::XOR" , SDTIntBinOp, + [SDNPCommutative, SDNPAssociative]>; +def addc : SDNode<"ISD::ADDC" , SDTIntBinOp, + [SDNPCommutative, SDNPOutFlag]>; +def adde : SDNode<"ISD::ADDE" , SDTIntBinOp, + [SDNPCommutative, SDNPOutFlag, SDNPInFlag]>; +def subc : SDNode<"ISD::SUBC" , SDTIntBinOp, + [SDNPOutFlag]>; +def sube : SDNode<"ISD::SUBE" , SDTIntBinOp, + [SDNPOutFlag, SDNPInFlag]>; + +def sext_inreg : SDNode<"ISD::SIGN_EXTEND_INREG", SDTExtInreg>; +def bswap : SDNode<"ISD::BSWAP" , SDTIntUnaryOp>; +def ctlz : SDNode<"ISD::CTLZ" , SDTIntUnaryOp>; +def cttz : SDNode<"ISD::CTTZ" , SDTIntUnaryOp>; +def ctpop : SDNode<"ISD::CTPOP" , SDTIntUnaryOp>; +def sext : SDNode<"ISD::SIGN_EXTEND", SDTIntExtendOp>; +def zext : SDNode<"ISD::ZERO_EXTEND", SDTIntExtendOp>; +def anyext : SDNode<"ISD::ANY_EXTEND" , SDTIntExtendOp>; +def trunc : SDNode<"ISD::TRUNCATE" , SDTIntTruncOp>; +def bitconvert : SDNode<"ISD::BIT_CONVERT", SDTUnaryOp>; +def extractelt : SDNode<"ISD::EXTRACT_VECTOR_ELT", SDTVecExtract>; +def insertelt : SDNode<"ISD::INSERT_VECTOR_ELT", SDTVecInsert>; + + +def fadd : SDNode<"ISD::FADD" , SDTFPBinOp, [SDNPCommutative]>; +def fsub : SDNode<"ISD::FSUB" , SDTFPBinOp>; +def fmul : SDNode<"ISD::FMUL" , SDTFPBinOp, [SDNPCommutative]>; +def fdiv : SDNode<"ISD::FDIV" , SDTFPBinOp>; +def frem : SDNode<"ISD::FREM" , SDTFPBinOp>; +def fabs : SDNode<"ISD::FABS" , SDTFPUnaryOp>; +def fneg : SDNode<"ISD::FNEG" , SDTFPUnaryOp>; +def fsqrt : SDNode<"ISD::FSQRT" , SDTFPUnaryOp>; +def fsin : SDNode<"ISD::FSIN" , SDTFPUnaryOp>; +def fcos : SDNode<"ISD::FCOS" , SDTFPUnaryOp>; +def frint : SDNode<"ISD::FRINT" , SDTFPUnaryOp>; +def ftrunc : SDNode<"ISD::FTRUNC" , SDTFPUnaryOp>; +def fceil : SDNode<"ISD::FCEIL" , SDTFPUnaryOp>; +def ffloor : SDNode<"ISD::FFLOOR" , SDTFPUnaryOp>; +def fnearbyint : SDNode<"ISD::FNEARBYINT" , SDTFPUnaryOp>; + +def fround : SDNode<"ISD::FP_ROUND" , SDTFPRoundOp>; +def fextend : SDNode<"ISD::FP_EXTEND" , SDTFPExtendOp>; +def fcopysign : SDNode<"ISD::FCOPYSIGN" , SDTFPSignOp>; + +def sint_to_fp : SDNode<"ISD::SINT_TO_FP" , SDTIntToFPOp>; +def uint_to_fp : SDNode<"ISD::UINT_TO_FP" , SDTIntToFPOp>; +def fp_to_sint : SDNode<"ISD::FP_TO_SINT" , SDTFPToIntOp>; +def fp_to_uint : SDNode<"ISD::FP_TO_UINT" , SDTFPToIntOp>; + +def setcc : SDNode<"ISD::SETCC" , SDTSetCC>; +def select : SDNode<"ISD::SELECT" , SDTSelect>; +def selectcc : SDNode<"ISD::SELECT_CC" , SDTSelectCC>; +def vsetcc : SDNode<"ISD::VSETCC" , SDTSetCC>; + +def brcond : SDNode<"ISD::BRCOND" , SDTBrcond, [SDNPHasChain]>; +def brind : SDNode<"ISD::BRIND" , SDTBrind, [SDNPHasChain]>; +def br : SDNode<"ISD::BR" , SDTBr, [SDNPHasChain]>; +def ret : SDNode<"ISD::RET" , SDTNone, [SDNPHasChain]>; +def trap : SDNode<"ISD::TRAP" , SDTNone, + [SDNPHasChain, SDNPSideEffect]>; + +def prefetch : SDNode<"ISD::PREFETCH" , STDPrefetch, + [SDNPHasChain, SDNPMayLoad, SDNPMayStore]>; + +def membarrier : SDNode<"ISD::MEMBARRIER" , STDMemBarrier, + [SDNPHasChain, SDNPSideEffect]>; + +def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , STDAtomic3, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; + +// Do not use ld, st directly. Use load, extload, sextload, zextload, store, +// and truncst (see below). +def ld : SDNode<"ISD::LOAD" , SDTLoad, + [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; +def st : SDNode<"ISD::STORE" , SDTStore, + [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; +def ist : SDNode<"ISD::STORE" , SDTIStore, + [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; + +def vector_shuffle : SDNode<"ISD::VECTOR_SHUFFLE", SDTVecShuffle, []>; +def build_vector : SDNode<"ISD::BUILD_VECTOR", SDTypeProfile<1, -1, []>, []>; +def scalar_to_vector : SDNode<"ISD::SCALAR_TO_VECTOR", SDTypeProfile<1, 1, []>, + []>; +def vector_extract : SDNode<"ISD::EXTRACT_VECTOR_ELT", + SDTypeProfile<1, 2, [SDTCisPtrTy<2>]>, []>; +def vector_insert : SDNode<"ISD::INSERT_VECTOR_ELT", + SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisPtrTy<3>]>, []>; + +// Nodes for intrinsics, you should use the intrinsic itself and let tblgen use +// these internally. Don't reference these directly. +def intrinsic_void : SDNode<"ISD::INTRINSIC_VOID", + SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>, + [SDNPHasChain]>; +def intrinsic_w_chain : SDNode<"ISD::INTRINSIC_W_CHAIN", + SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>, + [SDNPHasChain]>; +def intrinsic_wo_chain : SDNode<"ISD::INTRINSIC_WO_CHAIN", + SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>, []>; + +// Do not use cvt directly. Use cvt forms below +def cvt : SDNode<"ISD::CONVERT_RNDSAT", SDTConvertOp>; + +//===----------------------------------------------------------------------===// +// Selection DAG Condition Codes + +class CondCode; // ISD::CondCode enums +def SETOEQ : CondCode; def SETOGT : CondCode; +def SETOGE : CondCode; def SETOLT : CondCode; def SETOLE : CondCode; +def SETONE : CondCode; def SETO : CondCode; def SETUO : CondCode; +def SETUEQ : CondCode; def SETUGT : CondCode; def SETUGE : CondCode; +def SETULT : CondCode; def SETULE : CondCode; def SETUNE : CondCode; + +def SETEQ : CondCode; def SETGT : CondCode; def SETGE : CondCode; +def SETLT : CondCode; def SETLE : CondCode; def SETNE : CondCode; + + +//===----------------------------------------------------------------------===// +// Selection DAG Node Transformation Functions. +// +// This mechanism allows targets to manipulate nodes in the output DAG once a +// match has been formed. This is typically used to manipulate immediate +// values. +// +class SDNodeXForm<SDNode opc, code xformFunction> { + SDNode Opcode = opc; + code XFormFunction = xformFunction; +} + +def NOOP_SDNodeXForm : SDNodeXForm<imm, [{}]>; + + +//===----------------------------------------------------------------------===// +// Selection DAG Pattern Fragments. +// +// Pattern fragments are reusable chunks of dags that match specific things. +// They can take arguments and have C++ predicates that control whether they +// match. They are intended to make the patterns for common instructions more +// compact and readable. +// + +/// PatFrag - Represents a pattern fragment. This can match something on the +/// DAG, frame a single node to multiply nested other fragments. +/// +class PatFrag<dag ops, dag frag, code pred = [{}], + SDNodeXForm xform = NOOP_SDNodeXForm> { + dag Operands = ops; + dag Fragment = frag; + code Predicate = pred; + SDNodeXForm OperandTransform = xform; +} + +// PatLeaf's are pattern fragments that have no operands. This is just a helper +// to define immediates and other common things concisely. +class PatLeaf<dag frag, code pred = [{}], SDNodeXForm xform = NOOP_SDNodeXForm> + : PatFrag<(ops), frag, pred, xform>; + +// Leaf fragments. + +def vtInt : PatLeaf<(vt), [{ return N->getVT().isInteger(); }]>; +def vtFP : PatLeaf<(vt), [{ return N->getVT().isFloatingPoint(); }]>; + +def immAllOnes : PatLeaf<(imm), [{ return N->isAllOnesValue(); }]>; +def immAllOnesV: PatLeaf<(build_vector), [{ + return ISD::isBuildVectorAllOnes(N); +}]>; +def immAllOnesV_bc: PatLeaf<(bitconvert), [{ + return ISD::isBuildVectorAllOnes(N); +}]>; +def immAllZerosV: PatLeaf<(build_vector), [{ + return ISD::isBuildVectorAllZeros(N); +}]>; +def immAllZerosV_bc: PatLeaf<(bitconvert), [{ + return ISD::isBuildVectorAllZeros(N); +}]>; + + + +// Other helper fragments. +def not : PatFrag<(ops node:$in), (xor node:$in, immAllOnes)>; +def vnot : PatFrag<(ops node:$in), (xor node:$in, immAllOnesV)>; +def vnot_conv : PatFrag<(ops node:$in), (xor node:$in, immAllOnesV_bc)>; +def ineg : PatFrag<(ops node:$in), (sub 0, node:$in)>; + +// load fragments. +def unindexedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + return cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED; +}]>; +def load : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{ + return cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD; +}]>; + +// extending load fragments. +def extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{ + return cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD; +}]>; +def sextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{ + return cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD; +}]>; +def zextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{ + return cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD; +}]>; + +def extloadi1 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1; +}]>; +def extloadi8 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; +def extloadi16 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; +def extloadi32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; +def extloadf32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f32; +}]>; +def extloadf64 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f64; +}]>; + +def sextloadi1 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1; +}]>; +def sextloadi8 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; +def sextloadi16 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; +def sextloadi32 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; + +def zextloadi1 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1; +}]>; +def zextloadi8 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; +def zextloadi16 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; +def zextloadi32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; + +// store fragments. +def unindexedstore : PatFrag<(ops node:$val, node:$ptr), + (st node:$val, node:$ptr), [{ + return cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED; +}]>; +def store : PatFrag<(ops node:$val, node:$ptr), + (unindexedstore node:$val, node:$ptr), [{ + return !cast<StoreSDNode>(N)->isTruncatingStore(); +}]>; + +// truncstore fragments. +def truncstore : PatFrag<(ops node:$val, node:$ptr), + (unindexedstore node:$val, node:$ptr), [{ + return cast<StoreSDNode>(N)->isTruncatingStore(); +}]>; +def truncstorei8 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; +def truncstorei16 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; +def truncstorei32 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; +def truncstoref32 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32; +}]>; +def truncstoref64 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f64; +}]>; + +// indexed store fragments. +def istore : PatFrag<(ops node:$val, node:$base, node:$offset), + (ist node:$val, node:$base, node:$offset), [{ + return !cast<StoreSDNode>(N)->isTruncatingStore(); +}]>; + +def pre_store : PatFrag<(ops node:$val, node:$base, node:$offset), + (istore node:$val, node:$base, node:$offset), [{ + ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode(); + return AM == ISD::PRE_INC || AM == ISD::PRE_DEC; +}]>; + +def itruncstore : PatFrag<(ops node:$val, node:$base, node:$offset), + (ist node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->isTruncatingStore(); +}]>; +def pre_truncst : PatFrag<(ops node:$val, node:$base, node:$offset), + (itruncstore node:$val, node:$base, node:$offset), [{ + ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode(); + return AM == ISD::PRE_INC || AM == ISD::PRE_DEC; +}]>; +def pre_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset), + (pre_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1; +}]>; +def pre_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset), + (pre_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; +def pre_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset), + (pre_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; +def pre_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset), + (pre_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; +def pre_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset), + (pre_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32; +}]>; + +def post_store : PatFrag<(ops node:$val, node:$ptr, node:$offset), + (istore node:$val, node:$ptr, node:$offset), [{ + ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode(); + return AM == ISD::POST_INC || AM == ISD::POST_DEC; +}]>; + +def post_truncst : PatFrag<(ops node:$val, node:$base, node:$offset), + (itruncstore node:$val, node:$base, node:$offset), [{ + ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode(); + return AM == ISD::POST_INC || AM == ISD::POST_DEC; +}]>; +def post_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset), + (post_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1; +}]>; +def post_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset), + (post_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; +def post_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset), + (post_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; +def post_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset), + (post_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; +def post_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset), + (post_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32; +}]>; + +// setcc convenience fragments. +def setoeq : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETOEQ)>; +def setogt : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETOGT)>; +def setoge : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETOGE)>; +def setolt : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETOLT)>; +def setole : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETOLE)>; +def setone : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETONE)>; +def seto : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETO)>; +def setuo : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETUO)>; +def setueq : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETUEQ)>; +def setugt : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETUGT)>; +def setuge : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETUGE)>; +def setult : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETULT)>; +def setule : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETULE)>; +def setune : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETUNE)>; +def seteq : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETEQ)>; +def setgt : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETGT)>; +def setge : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETGE)>; +def setlt : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETLT)>; +def setle : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETLE)>; +def setne : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETNE)>; + +def atomic_cmp_swap_8 : + PatFrag<(ops node:$ptr, node:$cmp, node:$swap), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ + return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; +def atomic_cmp_swap_16 : + PatFrag<(ops node:$ptr, node:$cmp, node:$swap), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ + return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; +def atomic_cmp_swap_32 : + PatFrag<(ops node:$ptr, node:$cmp, node:$swap), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ + return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; +def atomic_cmp_swap_64 : + PatFrag<(ops node:$ptr, node:$cmp, node:$swap), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ + return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64; +}]>; + +multiclass binary_atomic_op<SDNode atomic_op> { + def _8 : PatFrag<(ops node:$ptr, node:$val), + (atomic_op node:$ptr, node:$val), [{ + return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8; + }]>; + def _16 : PatFrag<(ops node:$ptr, node:$val), + (atomic_op node:$ptr, node:$val), [{ + return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16; + }]>; + def _32 : PatFrag<(ops node:$ptr, node:$val), + (atomic_op node:$ptr, node:$val), [{ + return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32; + }]>; + def _64 : PatFrag<(ops node:$ptr, node:$val), + (atomic_op node:$ptr, node:$val), [{ + return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64; + }]>; +} + +defm atomic_load_add : binary_atomic_op<atomic_load_add>; +defm atomic_swap : binary_atomic_op<atomic_swap>; +defm atomic_load_sub : binary_atomic_op<atomic_load_sub>; +defm atomic_load_and : binary_atomic_op<atomic_load_and>; +defm atomic_load_or : binary_atomic_op<atomic_load_or>; +defm atomic_load_xor : binary_atomic_op<atomic_load_xor>; +defm atomic_load_nand : binary_atomic_op<atomic_load_nand>; +defm atomic_load_min : binary_atomic_op<atomic_load_min>; +defm atomic_load_max : binary_atomic_op<atomic_load_max>; +defm atomic_load_umin : binary_atomic_op<atomic_load_umin>; +defm atomic_load_umax : binary_atomic_op<atomic_load_umax>; + +//===----------------------------------------------------------------------===// +// Selection DAG CONVERT_RNDSAT patterns + +def cvtff : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FF; + }]>; + +def cvtss : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SS; + }]>; + +def cvtsu : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SU; + }]>; + +def cvtus : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_US; + }]>; + +def cvtuu : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_UU; + }]>; + +def cvtsf : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SF; + }]>; + +def cvtuf : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_UF; + }]>; + +def cvtfs : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FS; + }]>; + +def cvtfu : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FU; + }]>; + +//===----------------------------------------------------------------------===// +// Selection DAG Pattern Support. +// +// Patterns are what are actually matched against the target-flavored +// instruction selection DAG. Instructions defined by the target implicitly +// define patterns in most cases, but patterns can also be explicitly added when +// an operation is defined by a sequence of instructions (e.g. loading a large +// immediate value on RISC targets that do not support immediates as large as +// their GPRs). +// + +class Pattern<dag patternToMatch, list<dag> resultInstrs> { + dag PatternToMatch = patternToMatch; + list<dag> ResultInstrs = resultInstrs; + list<Predicate> Predicates = []; // See class Instruction in Target.td. + int AddedComplexity = 0; // See class Instruction in Target.td. +} + +// Pat - A simple (but common) form of a pattern, which produces a simple result +// not needing a full list. +class Pat<dag pattern, dag result> : Pattern<pattern, [result]>; + +//===----------------------------------------------------------------------===// +// Complex pattern definitions. +// + +class CPAttribute; +// Pass the parent Operand as root to CP function rather +// than the root of the sub-DAG +def CPAttrParentAsRoot : CPAttribute; + +// Complex patterns, e.g. X86 addressing mode, requires pattern matching code +// in C++. NumOperands is the number of operands returned by the select function; +// SelectFunc is the name of the function used to pattern match the max. pattern; +// RootNodes are the list of possible root nodes of the sub-dags to match. +// e.g. X86 addressing mode - def addr : ComplexPattern<4, "SelectAddr", [add]>; +// +class ComplexPattern<ValueType ty, int numops, string fn, + list<SDNode> roots = [], list<SDNodeProperty> props = [], + list<CPAttribute> attrs = []> { + ValueType Ty = ty; + int NumOperands = numops; + string SelectFunc = fn; + list<SDNode> RootNodes = roots; + list<SDNodeProperty> Properties = props; + list<CPAttribute> Attributes = attrs; +} + +//===----------------------------------------------------------------------===// +// Dwarf support. +// +def SDT_dwarf_loc : SDTypeProfile<0, 3, + [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>; +def dwarf_loc : SDNode<"ISD::DEBUG_LOC", SDT_dwarf_loc,[SDNPHasChain]>; diff --git a/include/llvm/Target/TargetSubtarget.h b/include/llvm/Target/TargetSubtarget.h new file mode 100644 index 0000000000000..eca45eb0d7459 --- /dev/null +++ b/include/llvm/Target/TargetSubtarget.h @@ -0,0 +1,42 @@ +//==-- llvm/Target/TargetSubtarget.h - Target Information --------*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes the subtarget options of a Target machine. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETSUBTARGET_H +#define LLVM_TARGET_TARGETSUBTARGET_H + +namespace llvm { + +//===----------------------------------------------------------------------===// +/// +/// TargetSubtarget - Generic base class for all target subtargets. All +/// Target-specific options that control code generation and printing should +/// be exposed through a TargetSubtarget-derived class. +/// +class TargetSubtarget { + TargetSubtarget(const TargetSubtarget&); // DO NOT IMPLEMENT + void operator=(const TargetSubtarget&); // DO NOT IMPLEMENT +protected: // Can only create subclasses... + TargetSubtarget(); +public: + virtual ~TargetSubtarget(); + + /// getSpecialAddressLatency - For targets where it is beneficial to + /// backschedule instructions that compute addresses, return a value + /// indicating the number of scheduling cycles of backscheduling that + /// should be attempted. + virtual unsigned getSpecialAddressLatency() const { return 0; } +}; + +} // End llvm namespace + +#endif |