summaryrefslogtreecommitdiff
path: root/lib/Target/X86/X86Subtarget.h
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/X86/X86Subtarget.h')
-rw-r--r--lib/Target/X86/X86Subtarget.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h
index 754b5b924717..e0263d66e928 100644
--- a/lib/Target/X86/X86Subtarget.h
+++ b/lib/Target/X86/X86Subtarget.h
@@ -162,9 +162,9 @@ protected:
/// True if unaligned 32-byte memory accesses are slow.
bool IsUAMem32Slow;
- /// HasVectorUAMem - True if SIMD operations can have unaligned memory
- /// operands. This may require setting a feature bit in the processor.
- bool HasVectorUAMem;
+ /// True if SSE operations can have unaligned memory operands.
+ /// This may require setting a configuration bit in the processor.
+ bool HasSSEUnalignedMem;
/// HasCmpxchg16b - True if this processor has the CMPXCHG16B instruction;
/// this is true for most x86-64 chips, but not the first AMD chips.
@@ -378,7 +378,7 @@ public:
bool isSHLDSlow() const { return IsSHLDSlow; }
bool isUnalignedMemAccessFast() const { return IsUAMemFast; }
bool isUnalignedMem32Slow() const { return IsUAMem32Slow; }
- bool hasVectorUAMem() const { return HasVectorUAMem; }
+ bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }
bool hasCmpxchg16b() const { return HasCmpxchg16b; }
bool useLeaForSP() const { return UseLeaForSP; }
bool hasSlowDivide32() const { return HasSlowDivide32; }