diff options
Diffstat (limited to 'llvm/include/llvm/ADT')
56 files changed, 3076 insertions, 917 deletions
diff --git a/llvm/include/llvm/ADT/APFloat.h b/llvm/include/llvm/ADT/APFloat.h index ed25b2cd89f1..876e52c150a0 100644 --- a/llvm/include/llvm/ADT/APFloat.h +++ b/llvm/include/llvm/ADT/APFloat.h @@ -18,6 +18,7 @@ #include "llvm/ADT/APInt.h" #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/FloatingPointMode.h" #include "llvm/Support/ErrorHandling.h" #include <memory> @@ -141,7 +142,7 @@ enum lostFraction { // Example of truncated bits: // members. struct APFloatBase { typedef APInt::WordType integerPart; - static const unsigned integerPartWidth = APInt::APINT_BITS_PER_WORD; + static constexpr unsigned integerPartWidth = APInt::APINT_BITS_PER_WORD; /// A signed type to represent a floating point numbers unbiased exponent. typedef int32_t ExponentType; @@ -150,6 +151,7 @@ struct APFloatBase { /// @{ enum Semantics { S_IEEEhalf, + S_BFloat, S_IEEEsingle, S_IEEEdouble, S_x87DoubleExtended, @@ -161,6 +163,7 @@ struct APFloatBase { static Semantics SemanticsToEnum(const llvm::fltSemantics &Sem); static const fltSemantics &IEEEhalf() LLVM_READNONE; + static const fltSemantics &BFloat() LLVM_READNONE; static const fltSemantics &IEEEsingle() LLVM_READNONE; static const fltSemantics &IEEEdouble() LLVM_READNONE; static const fltSemantics &IEEEquad() LLVM_READNONE; @@ -182,13 +185,15 @@ struct APFloatBase { }; /// IEEE-754R 4.3: Rounding-direction attributes. - enum roundingMode { - rmNearestTiesToEven, - rmTowardPositive, - rmTowardNegative, - rmTowardZero, - rmNearestTiesToAway - }; + using roundingMode = llvm::RoundingMode; + + static constexpr roundingMode rmNearestTiesToEven = + RoundingMode::NearestTiesToEven; + static constexpr roundingMode rmTowardPositive = RoundingMode::TowardPositive; + static constexpr roundingMode rmTowardNegative = RoundingMode::TowardNegative; + static constexpr roundingMode rmTowardZero = RoundingMode::TowardZero; + static constexpr roundingMode rmNearestTiesToAway = + RoundingMode::NearestTiesToAway; /// IEEE-754R 7: Default exception handling. /// @@ -511,6 +516,7 @@ private: opStatus divideSpecials(const IEEEFloat &); opStatus multiplySpecials(const IEEEFloat &); opStatus modSpecials(const IEEEFloat &); + opStatus remainderSpecials(const IEEEFloat&); /// @} @@ -537,6 +543,7 @@ private: /// @} APInt convertHalfAPFloatToAPInt() const; + APInt convertBFloatAPFloatToAPInt() const; APInt convertFloatAPFloatToAPInt() const; APInt convertDoubleAPFloatToAPInt() const; APInt convertQuadrupleAPFloatToAPInt() const; @@ -544,6 +551,7 @@ private: APInt convertPPCDoubleDoubleAPFloatToAPInt() const; void initFromAPInt(const fltSemantics *Sem, const APInt &api); void initFromHalfAPInt(const APInt &api); + void initFromBFloatAPInt(const APInt &api); void initFromFloatAPInt(const APInt &api); void initFromDoubleAPInt(const APInt &api); void initFromQuadrupleAPInt(const APInt &api); @@ -585,7 +593,7 @@ IEEEFloat scalbn(IEEEFloat X, int Exp, IEEEFloat::roundingMode); IEEEFloat frexp(const IEEEFloat &Val, int &Exp, IEEEFloat::roundingMode RM); // This mode implements more precise float in terms of two APFloats. -// The interface and layout is designed for arbitray underlying semantics, +// The interface and layout is designed for arbitrary underlying semantics, // though currently only PPCDoubleDouble semantics are supported, whose // corresponding underlying semantics are IEEEdouble. class DoubleAPFloat final : public APFloatBase { @@ -853,8 +861,8 @@ public: APFloat(const fltSemantics &Semantics) : U(Semantics) {} APFloat(const fltSemantics &Semantics, StringRef S); APFloat(const fltSemantics &Semantics, integerPart I) : U(Semantics, I) {} - template <typename T, typename = typename std::enable_if< - std::is_floating_point<T>::value>::type> + template <typename T, + typename = std::enable_if_t<std::is_floating_point<T>::value>> APFloat(const fltSemantics &Semantics, T V) = delete; // TODO: Remove this constructor. This isn't faster than the first one. APFloat(const fltSemantics &Semantics, uninitializedTag) @@ -950,9 +958,10 @@ public: /// Returns a float which is bitcasted from an all one value int. /// + /// \param Semantics - type float semantics /// \param BitWidth - Select float type - /// \param isIEEE - If 128 bit number, select between PPC and IEEE - static APFloat getAllOnesValue(unsigned BitWidth, bool isIEEE = false); + static APFloat getAllOnesValue(const fltSemantics &Semantics, + unsigned BitWidth); /// Used to insert APFloat objects, or objects that contain APFloat objects, /// into FoldingSets. @@ -1035,6 +1044,13 @@ public: APFLOAT_DISPATCH_ON_SEMANTICS(next(nextDown)); } + /// Negate an APFloat. + APFloat operator-() const { + APFloat Result(*this); + Result.changeSign(); + return Result; + } + /// Add two APFloats, rounding ties to the nearest even. /// No error checking. APFloat operator+(const APFloat &RHS) const { @@ -1117,7 +1133,27 @@ public: double convertToDouble() const { return getIEEE().convertToDouble(); } float convertToFloat() const { return getIEEE().convertToFloat(); } - bool operator==(const APFloat &) const = delete; + bool operator==(const APFloat &RHS) const { return compare(RHS) == cmpEqual; } + + bool operator!=(const APFloat &RHS) const { return compare(RHS) != cmpEqual; } + + bool operator<(const APFloat &RHS) const { + return compare(RHS) == cmpLessThan; + } + + bool operator>(const APFloat &RHS) const { + return compare(RHS) == cmpGreaterThan; + } + + bool operator<=(const APFloat &RHS) const { + cmpResult Res = compare(RHS); + return Res == cmpLessThan || Res == cmpEqual; + } + + bool operator>=(const APFloat &RHS) const { + cmpResult Res = compare(RHS); + return Res == cmpGreaterThan || Res == cmpEqual; + } cmpResult compare(const APFloat &RHS) const { assert(&getSemantics() == &RHS.getSemantics() && @@ -1249,7 +1285,7 @@ inline APFloat minnum(const APFloat &A, const APFloat &B) { return B; if (B.isNaN()) return A; - return (B.compare(A) == APFloat::cmpLessThan) ? B : A; + return B < A ? B : A; } /// Implements IEEE maxNum semantics. Returns the larger of the 2 arguments if @@ -1260,7 +1296,7 @@ inline APFloat maxnum(const APFloat &A, const APFloat &B) { return B; if (B.isNaN()) return A; - return (A.compare(B) == APFloat::cmpLessThan) ? B : A; + return A < B ? B : A; } /// Implements IEEE 754-2018 minimum semantics. Returns the smaller of 2 @@ -1273,7 +1309,7 @@ inline APFloat minimum(const APFloat &A, const APFloat &B) { return B; if (A.isZero() && B.isZero() && (A.isNegative() != B.isNegative())) return A.isNegative() ? A : B; - return (B.compare(A) == APFloat::cmpLessThan) ? B : A; + return B < A ? B : A; } /// Implements IEEE 754-2018 maximum semantics. Returns the larger of 2 @@ -1286,7 +1322,7 @@ inline APFloat maximum(const APFloat &A, const APFloat &B) { return B; if (A.isZero() && B.isZero() && (A.isNegative() != B.isNegative())) return A.isNegative() ? B : A; - return (A.compare(B) == APFloat::cmpLessThan) ? B : A; + return A < B ? B : A; } } // namespace llvm diff --git a/llvm/include/llvm/ADT/APInt.h b/llvm/include/llvm/ADT/APInt.h index 0791a6d686a3..f7df648d27ed 100644 --- a/llvm/include/llvm/ADT/APInt.h +++ b/llvm/include/llvm/ADT/APInt.h @@ -84,7 +84,7 @@ public: UP, }; - static const WordType WORDTYPE_MAX = ~WordType(0); + static constexpr WordType WORDTYPE_MAX = ~WordType(0); private: /// This union is used to store the integer value. When the @@ -616,9 +616,11 @@ public: } /// Wrap version of getBitsSet. - /// If \p hiBit is no less than \p loBit, this is same with getBitsSet. - /// If \p hiBit is less than \p loBit, the set bits "wrap". For example, with - /// parameters (32, 28, 4), you would get 0xF000000F. + /// If \p hiBit is bigger than \p loBit, this is same with getBitsSet. + /// If \p hiBit is not bigger than \p loBit, the set bits "wrap". For example, + /// with parameters (32, 28, 4), you would get 0xF000000F. + /// If \p hiBit is equal to \p loBit, you would get a result with all bits + /// set. static APInt getBitsSetWithWrap(unsigned numBits, unsigned loBit, unsigned hiBit) { APInt Res(numBits, 0); @@ -1448,12 +1450,13 @@ public: } /// Set the bits from loBit (inclusive) to hiBit (exclusive) to 1. - /// This function handles "wrap" case when \p loBit > \p hiBit, and calls - /// setBits when \p loBit <= \p hiBit. + /// This function handles "wrap" case when \p loBit >= \p hiBit, and calls + /// setBits when \p loBit < \p hiBit. + /// For \p loBit == \p hiBit wrap case, set every bit to 1. void setBitsWithWrap(unsigned loBit, unsigned hiBit) { assert(hiBit <= BitWidth && "hiBit out of range"); assert(loBit <= BitWidth && "loBit out of range"); - if (loBit <= hiBit) { + if (loBit < hiBit) { setBits(loBit, hiBit); return; } @@ -2283,7 +2286,7 @@ void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst, unsigned StoreBytes); /// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting /// from Src into IntVal, which is assumed to be wide enough and to hold zero. -void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes); +void LoadIntFromMemory(APInt &IntVal, const uint8_t *Src, unsigned LoadBytes); } // namespace llvm diff --git a/llvm/include/llvm/ADT/AllocatorList.h b/llvm/include/llvm/ADT/AllocatorList.h index 405a2e4264df..447d7a7538db 100644 --- a/llvm/include/llvm/ADT/AllocatorList.h +++ b/llvm/include/llvm/ADT/AllocatorList.h @@ -110,8 +110,8 @@ private: template <class OtherValueT, class OtherIteratorBase> IteratorImpl(const IteratorImpl<OtherValueT, OtherIteratorBase> &X, - typename std::enable_if<std::is_convertible< - OtherIteratorBase, IteratorBase>::value>::type * = nullptr) + std::enable_if_t<std::is_convertible< + OtherIteratorBase, IteratorBase>::value> * = nullptr) : base_type(X.wrapped()) {} ~IteratorImpl() = default; diff --git a/llvm/include/llvm/ADT/Any.h b/llvm/include/llvm/ADT/Any.h index 49657e02a991..0aded628cda4 100644 --- a/llvm/include/llvm/ADT/Any.h +++ b/llvm/include/llvm/ADT/Any.h @@ -59,26 +59,26 @@ public: // When T is Any or T is not copy-constructible we need to explicitly disable // the forwarding constructor so that the copy constructor gets selected // instead. - template < - typename T, - typename std::enable_if< - llvm::conjunction< - llvm::negation<std::is_same<typename std::decay<T>::type, Any>>, - // We also disable this overload when an `Any` object can be - // converted to the parameter type because in that case, this - // constructor may combine with that conversion during overload - // resolution for determining copy constructibility, and then - // when we try to determine copy constructibility below we may - // infinitely recurse. This is being evaluated by the standards - // committee as a potential DR in `std::any` as well, but we're - // going ahead and adopting it to work-around usage of `Any` with - // types that need to be implicitly convertible from an `Any`. - llvm::negation<std::is_convertible<Any, typename std::decay<T>::type>>, - std::is_copy_constructible<typename std::decay<T>::type>>::value, - int>::type = 0> + template <typename T, + std::enable_if_t< + llvm::conjunction< + llvm::negation<std::is_same<std::decay_t<T>, Any>>, + // We also disable this overload when an `Any` object can be + // converted to the parameter type because in that case, + // this constructor may combine with that conversion during + // overload resolution for determining copy + // constructibility, and then when we try to determine copy + // constructibility below we may infinitely recurse. This is + // being evaluated by the standards committee as a potential + // DR in `std::any` as well, but we're going ahead and + // adopting it to work-around usage of `Any` with types that + // need to be implicitly convertible from an `Any`. + llvm::negation<std::is_convertible<Any, std::decay_t<T>>>, + std::is_copy_constructible<std::decay_t<T>>>::value, + int> = 0> Any(T &&Value) { - using U = typename std::decay<T>::type; - Storage = std::make_unique<StorageImpl<U>>(std::forward<T>(Value)); + Storage = + std::make_unique<StorageImpl<std::decay_t<T>>>(std::forward<T>(Value)); } Any(Any &&Other) : Storage(std::move(Other.Storage)) {} @@ -114,32 +114,27 @@ template <typename T> const char Any::TypeId<T>::Id = 0; template <typename T> bool any_isa(const Any &Value) { if (!Value.Storage) return false; - using U = - typename std::remove_cv<typename std::remove_reference<T>::type>::type; - return Value.Storage->id() == &Any::TypeId<U>::Id; + return Value.Storage->id() == + &Any::TypeId<std::remove_cv_t<std::remove_reference_t<T>>>::Id; } template <class T> T any_cast(const Any &Value) { - using U = - typename std::remove_cv<typename std::remove_reference<T>::type>::type; - return static_cast<T>(*any_cast<U>(&Value)); + return static_cast<T>( + *any_cast<std::remove_cv_t<std::remove_reference_t<T>>>(&Value)); } template <class T> T any_cast(Any &Value) { - using U = - typename std::remove_cv<typename std::remove_reference<T>::type>::type; - return static_cast<T>(*any_cast<U>(&Value)); + return static_cast<T>( + *any_cast<std::remove_cv_t<std::remove_reference_t<T>>>(&Value)); } template <class T> T any_cast(Any &&Value) { - using U = - typename std::remove_cv<typename std::remove_reference<T>::type>::type; - return static_cast<T>(std::move(*any_cast<U>(&Value))); + return static_cast<T>(std::move( + *any_cast<std::remove_cv_t<std::remove_reference_t<T>>>(&Value))); } template <class T> const T *any_cast(const Any *Value) { - using U = - typename std::remove_cv<typename std::remove_reference<T>::type>::type; + using U = std::remove_cv_t<std::remove_reference_t<T>>; assert(Value && any_isa<T>(*Value) && "Bad any cast!"); if (!Value || !any_isa<U>(*Value)) return nullptr; @@ -147,7 +142,7 @@ template <class T> const T *any_cast(const Any *Value) { } template <class T> T *any_cast(Any *Value) { - using U = typename std::decay<T>::type; + using U = std::decay_t<T>; assert(Value && any_isa<U>(*Value) && "Bad any cast!"); if (!Value || !any_isa<U>(*Value)) return nullptr; diff --git a/llvm/include/llvm/ADT/ArrayRef.h b/llvm/include/llvm/ADT/ArrayRef.h index 3d22442918cd..5ed4d0766c34 100644 --- a/llvm/include/llvm/ADT/ArrayRef.h +++ b/llvm/include/llvm/ADT/ArrayRef.h @@ -38,7 +38,7 @@ namespace llvm { /// This is intended to be trivially copyable, so it should be passed by /// value. template<typename T> - class LLVM_NODISCARD ArrayRef { + class LLVM_GSL_POINTER LLVM_NODISCARD ArrayRef { public: using iterator = const T *; using const_iterator = const T *; @@ -114,30 +114,28 @@ namespace llvm { /// Construct an ArrayRef<const T*> from ArrayRef<T*>. This uses SFINAE to /// ensure that only ArrayRefs of pointers can be converted. template <typename U> - ArrayRef( - const ArrayRef<U *> &A, - typename std::enable_if< - std::is_convertible<U *const *, T const *>::value>::type * = nullptr) - : Data(A.data()), Length(A.size()) {} + ArrayRef(const ArrayRef<U *> &A, + std::enable_if_t<std::is_convertible<U *const *, T const *>::value> + * = nullptr) + : Data(A.data()), Length(A.size()) {} /// Construct an ArrayRef<const T*> from a SmallVector<T*>. This is /// templated in order to avoid instantiating SmallVectorTemplateCommon<T> /// whenever we copy-construct an ArrayRef. - template<typename U, typename DummyT> + template <typename U, typename DummyT> /*implicit*/ ArrayRef( - const SmallVectorTemplateCommon<U *, DummyT> &Vec, - typename std::enable_if< - std::is_convertible<U *const *, T const *>::value>::type * = nullptr) - : Data(Vec.data()), Length(Vec.size()) { - } + const SmallVectorTemplateCommon<U *, DummyT> &Vec, + std::enable_if_t<std::is_convertible<U *const *, T const *>::value> * = + nullptr) + : Data(Vec.data()), Length(Vec.size()) {} /// Construct an ArrayRef<const T*> from std::vector<T*>. This uses SFINAE /// to ensure that only vectors of pointers can be converted. - template<typename U, typename A> + template <typename U, typename A> ArrayRef(const std::vector<U *, A> &Vec, - typename std::enable_if< - std::is_convertible<U *const *, T const *>::value>::type* = 0) - : Data(Vec.data()), Length(Vec.size()) {} + std::enable_if_t<std::is_convertible<U *const *, T const *>::value> + * = 0) + : Data(Vec.data()), Length(Vec.size()) {} /// @} /// @name Simple Operations @@ -256,7 +254,7 @@ namespace llvm { /// The declaration here is extra complicated so that "arrayRef = {}" /// continues to select the move assignment operator. template <typename U> - typename std::enable_if<std::is_same<U, T>::value, ArrayRef<T>>::type & + std::enable_if_t<std::is_same<U, T>::value, ArrayRef<T>> & operator=(U &&Temporary) = delete; /// Disallow accidental assignment from a temporary. @@ -264,7 +262,7 @@ namespace llvm { /// The declaration here is extra complicated so that "arrayRef = {}" /// continues to select the move assignment operator. template <typename U> - typename std::enable_if<std::is_same<U, T>::value, ArrayRef<T>>::type & + std::enable_if_t<std::is_same<U, T>::value, ArrayRef<T>> & operator=(std::initializer_list<U>) = delete; /// @} @@ -308,17 +306,17 @@ namespace llvm { /// Construct an empty MutableArrayRef from None. /*implicit*/ MutableArrayRef(NoneType) : ArrayRef<T>() {} - /// Construct an MutableArrayRef from a single element. + /// Construct a MutableArrayRef from a single element. /*implicit*/ MutableArrayRef(T &OneElt) : ArrayRef<T>(OneElt) {} - /// Construct an MutableArrayRef from a pointer and length. + /// Construct a MutableArrayRef from a pointer and length. /*implicit*/ MutableArrayRef(T *data, size_t length) : ArrayRef<T>(data, length) {} - /// Construct an MutableArrayRef from a range. + /// Construct a MutableArrayRef from a range. MutableArrayRef(T *begin, T *end) : ArrayRef<T>(begin, end) {} - /// Construct an MutableArrayRef from a SmallVector. + /// Construct a MutableArrayRef from a SmallVector. /*implicit*/ MutableArrayRef(SmallVectorImpl<T> &Vec) : ArrayRef<T>(Vec) {} @@ -326,12 +324,12 @@ namespace llvm { /*implicit*/ MutableArrayRef(std::vector<T> &Vec) : ArrayRef<T>(Vec) {} - /// Construct an ArrayRef from a std::array + /// Construct a MutableArrayRef from a std::array template <size_t N> /*implicit*/ constexpr MutableArrayRef(std::array<T, N> &Arr) : ArrayRef<T>(Arr) {} - /// Construct an MutableArrayRef from a C array. + /// Construct a MutableArrayRef from a C array. template <size_t N> /*implicit*/ constexpr MutableArrayRef(T (&Arr)[N]) : ArrayRef<T>(Arr) {} @@ -534,11 +532,21 @@ namespace llvm { return LHS.equals(RHS); } - template<typename T> + template <typename T> + inline bool operator==(SmallVectorImpl<T> &LHS, ArrayRef<T> RHS) { + return ArrayRef<T>(LHS).equals(RHS); + } + + template <typename T> inline bool operator!=(ArrayRef<T> LHS, ArrayRef<T> RHS) { return !(LHS == RHS); } + template <typename T> + inline bool operator!=(SmallVectorImpl<T> &LHS, ArrayRef<T> RHS) { + return !(LHS == RHS); + } + /// @} template <typename T> hash_code hash_value(ArrayRef<T> S) { diff --git a/llvm/include/llvm/ADT/BitVector.h b/llvm/include/llvm/ADT/BitVector.h index 5284be8c4a02..a8d0f07af94a 100644 --- a/llvm/include/llvm/ADT/BitVector.h +++ b/llvm/include/llvm/ADT/BitVector.h @@ -14,6 +14,7 @@ #define LLVM_ADT_BITVECTOR_H #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMapInfo.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/MathExtras.h" #include <algorithm> @@ -531,24 +532,10 @@ public: // Comparison operators. bool operator==(const BitVector &RHS) const { - unsigned ThisWords = NumBitWords(size()); - unsigned RHSWords = NumBitWords(RHS.size()); - unsigned i; - for (i = 0; i != std::min(ThisWords, RHSWords); ++i) - if (Bits[i] != RHS.Bits[i]) - return false; - - // Verify that any extra words are all zeros. - if (i != ThisWords) { - for (; i != ThisWords; ++i) - if (Bits[i]) - return false; - } else if (i != RHSWords) { - for (; i != RHSWords; ++i) - if (RHS.Bits[i]) - return false; - } - return true; + if (size() != RHS.size()) + return false; + unsigned NumWords = NumBitWords(size()); + return Bits.take_front(NumWords) == RHS.Bits.take_front(NumWords); } bool operator!=(const BitVector &RHS) const { @@ -719,6 +706,14 @@ public: if (this == &RHS) return *this; Size = RHS.size(); + + // Handle tombstone when the BitVector is a key of a DenseHash. + if (RHS.isInvalid()) { + std::free(Bits.data()); + Bits = None; + return *this; + } + unsigned RHSWords = NumBitWords(Size); if (Size <= getBitCapacity()) { if (Size) @@ -758,6 +753,16 @@ public: std::swap(Size, RHS.Size); } + void invalid() { + assert(!Size && Bits.empty()); + Size = (unsigned)-1; + } + bool isInvalid() const { return Size == (unsigned)-1; } + + ArrayRef<BitWord> getData() const { + return Bits.take_front(NumBitWords(size())); + } + //===--------------------------------------------------------------------===// // Portable bit mask operations. //===--------------------------------------------------------------------===// @@ -932,6 +937,23 @@ inline size_t capacity_in_bytes(const BitVector &X) { return X.getMemorySize(); } +template <> struct DenseMapInfo<BitVector> { + static inline BitVector getEmptyKey() { return BitVector(); } + static inline BitVector getTombstoneKey() { + BitVector V; + V.invalid(); + return V; + } + static unsigned getHashValue(const BitVector &V) { + return DenseMapInfo<std::pair<unsigned, ArrayRef<uintptr_t>>>::getHashValue( + std::make_pair(V.size(), V.getData())); + } + static bool isEqual(const BitVector &LHS, const BitVector &RHS) { + if (LHS.isInvalid() || RHS.isInvalid()) + return LHS.isInvalid() == RHS.isInvalid(); + return LHS == RHS; + } +}; } // end namespace llvm namespace std { diff --git a/llvm/include/llvm/ADT/Bitfields.h b/llvm/include/llvm/ADT/Bitfields.h new file mode 100644 index 000000000000..d93f6483fa52 --- /dev/null +++ b/llvm/include/llvm/ADT/Bitfields.h @@ -0,0 +1,289 @@ +//===-- llvm/ADT/Bitfield.h - Get and Set bits in an integer ---*- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file implements methods to test, set and extract typed bits from packed +/// unsigned integers. +/// +/// Why not C++ bitfields? +/// ---------------------- +/// C++ bitfields do not offer control over the bit layout nor consistent +/// behavior when it comes to out of range values. +/// For instance, the layout is implementation defined and adjacent bits may be +/// packed together but are not required to. This is problematic when storage is +/// sparse and data must be stored in a particular integer type. +/// +/// The methods provided in this file ensure precise control over the +/// layout/storage as well as protection against out of range values. +/// +/// Usage example +/// ------------- +/// \code{.cpp} +/// uint8_t Storage = 0; +/// +/// // Store and retrieve a single bit as bool. +/// using Bool = Bitfield::Element<bool, 0, 1>; +/// Bitfield::set<Bool>(Storage, true); +/// EXPECT_EQ(Storage, 0b00000001); +/// // ^ +/// EXPECT_EQ(Bitfield::get<Bool>(Storage), true); +/// +/// // Store and retrieve a 2 bit typed enum. +/// // Note: enum underlying type must be unsigned. +/// enum class SuitEnum : uint8_t { CLUBS, DIAMONDS, HEARTS, SPADES }; +/// // Note: enum maximum value needs to be passed in as last parameter. +/// using Suit = Bitfield::Element<SuitEnum, 1, 2, SuitEnum::SPADES>; +/// Bitfield::set<Suit>(Storage, SuitEnum::HEARTS); +/// EXPECT_EQ(Storage, 0b00000101); +/// // ^^ +/// EXPECT_EQ(Bitfield::get<Suit>(Storage), SuitEnum::HEARTS); +/// +/// // Store and retrieve a 5 bit value as unsigned. +/// using Value = Bitfield::Element<unsigned, 3, 5>; +/// Bitfield::set<Value>(Storage, 10); +/// EXPECT_EQ(Storage, 0b01010101); +/// // ^^^^^ +/// EXPECT_EQ(Bitfield::get<Value>(Storage), 10U); +/// +/// // Interpret the same 5 bit value as signed. +/// using SignedValue = Bitfield::Element<int, 3, 5>; +/// Bitfield::set<SignedValue>(Storage, -2); +/// EXPECT_EQ(Storage, 0b11110101); +/// // ^^^^^ +/// EXPECT_EQ(Bitfield::get<SignedValue>(Storage), -2); +/// +/// // Ability to efficiently test if a field is non zero. +/// EXPECT_TRUE(Bitfield::test<Value>(Storage)); +/// +/// // Alter Storage changes value. +/// Storage = 0; +/// EXPECT_EQ(Bitfield::get<Bool>(Storage), false); +/// EXPECT_EQ(Bitfield::get<Suit>(Storage), SuitEnum::CLUBS); +/// EXPECT_EQ(Bitfield::get<Value>(Storage), 0U); +/// EXPECT_EQ(Bitfield::get<SignedValue>(Storage), 0); +/// +/// Storage = 255; +/// EXPECT_EQ(Bitfield::get<Bool>(Storage), true); +/// EXPECT_EQ(Bitfield::get<Suit>(Storage), SuitEnum::SPADES); +/// EXPECT_EQ(Bitfield::get<Value>(Storage), 31U); +/// EXPECT_EQ(Bitfield::get<SignedValue>(Storage), -1); +/// \endcode +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_BITFIELDS_H +#define LLVM_ADT_BITFIELDS_H + +#include <cassert> +#include <climits> // CHAR_BIT +#include <cstddef> // size_t +#include <cstdint> // uintXX_t +#include <limits> // numeric_limits +#include <type_traits> + +namespace llvm { + +namespace bitfields_details { + +/// A struct defining useful bit patterns for n-bits integer types. +template <typename T, unsigned Bits> struct BitPatterns { + /// Bit patterns are forged using the equivalent `Unsigned` type because of + /// undefined operations over signed types (e.g. Bitwise shift operators). + /// Moreover same size casting from unsigned to signed is well defined but not + /// the other way around. + using Unsigned = typename std::make_unsigned<T>::type; + static_assert(sizeof(Unsigned) == sizeof(T), "Types must have same size"); + + static constexpr unsigned TypeBits = sizeof(Unsigned) * CHAR_BIT; + static_assert(TypeBits >= Bits, "n-bit must fit in T"); + + /// e.g. with TypeBits == 8 and Bits == 6. + static constexpr Unsigned AllZeros = Unsigned(0); // 00000000 + static constexpr Unsigned AllOnes = ~Unsigned(0); // 11111111 + static constexpr Unsigned Umin = AllZeros; // 00000000 + static constexpr Unsigned Umax = AllOnes >> (TypeBits - Bits); // 00111111 + static constexpr Unsigned SignBitMask = Unsigned(1) << (Bits - 1); // 00100000 + static constexpr Unsigned Smax = Umax >> 1U; // 00011111 + static constexpr Unsigned Smin = ~Smax; // 11100000 + static constexpr Unsigned SignExtend = Unsigned(Smin << 1U); // 11000000 +}; + +/// `Compressor` is used to manipulate the bits of a (possibly signed) integer +/// type so it can be packed and unpacked into a `bits` sized integer, +/// `Compressor` is specialized on signed-ness so no runtime cost is incurred. +/// The `pack` method also checks that the passed in `UserValue` is valid. +template <typename T, unsigned Bits, bool = std::is_unsigned<T>::value> +struct Compressor { + static_assert(std::is_unsigned<T>::value, "T is unsigned"); + using BP = BitPatterns<T, Bits>; + + static T pack(T UserValue, T UserMaxValue) { + assert(UserValue <= UserMaxValue && "value is too big"); + assert(UserValue <= BP::Umax && "value is too big"); + return UserValue; + } + + static T unpack(T StorageValue) { return StorageValue; } +}; + +template <typename T, unsigned Bits> struct Compressor<T, Bits, false> { + static_assert(std::is_signed<T>::value, "T is signed"); + using BP = BitPatterns<T, Bits>; + + static T pack(T UserValue, T UserMaxValue) { + assert(UserValue <= UserMaxValue && "value is too big"); + assert(UserValue <= T(BP::Smax) && "value is too big"); + assert(UserValue >= T(BP::Smin) && "value is too small"); + if (UserValue < 0) + UserValue &= ~BP::SignExtend; + return UserValue; + } + + static T unpack(T StorageValue) { + if (StorageValue >= T(BP::SignBitMask)) + StorageValue |= BP::SignExtend; + return StorageValue; + } +}; + +/// Impl is where Bifield description and Storage are put together to interact +/// with values. +template <typename Bitfield, typename StorageType> struct Impl { + static_assert(std::is_unsigned<StorageType>::value, + "Storage must be unsigned"); + using IntegerType = typename Bitfield::IntegerType; + using C = Compressor<IntegerType, Bitfield::Bits>; + using BP = BitPatterns<StorageType, Bitfield::Bits>; + + static constexpr size_t StorageBits = sizeof(StorageType) * CHAR_BIT; + static_assert(Bitfield::FirstBit <= StorageBits, "Data must fit in mask"); + static_assert(Bitfield::LastBit <= StorageBits, "Data must fit in mask"); + static constexpr StorageType Mask = BP::Umax << Bitfield::Shift; + + /// Checks `UserValue` is within bounds and packs it between `FirstBit` and + /// `LastBit` of `Packed` leaving the rest unchanged. + static void update(StorageType &Packed, IntegerType UserValue) { + const StorageType StorageValue = C::pack(UserValue, Bitfield::UserMaxValue); + Packed &= ~Mask; + Packed |= StorageValue << Bitfield::Shift; + } + + /// Interprets bits between `FirstBit` and `LastBit` of `Packed` as + /// an`IntegerType`. + static IntegerType extract(StorageType Packed) { + const StorageType StorageValue = (Packed & Mask) >> Bitfield::Shift; + return C::unpack(StorageValue); + } + + /// Interprets bits between `FirstBit` and `LastBit` of `Packed` as + /// an`IntegerType`. + static StorageType test(StorageType Packed) { return Packed & Mask; } +}; + +/// `Bitfield` deals with the following type: +/// - unsigned enums +/// - signed and unsigned integer +/// - `bool` +/// Internally though we only manipulate integer with well defined and +/// consistent semantics, this excludes typed enums and `bool` that are replaced +/// with their unsigned counterparts. The correct type is restored in the public +/// API. +template <typename T, bool = std::is_enum<T>::value> +struct ResolveUnderlyingType { + using type = typename std::underlying_type<T>::type; +}; +template <typename T> struct ResolveUnderlyingType<T, false> { + using type = T; +}; +template <> struct ResolveUnderlyingType<bool, false> { + /// In case sizeof(bool) != 1, replace `void` by an additionnal + /// std::conditional. + using type = std::conditional<sizeof(bool) == 1, uint8_t, void>::type; +}; + +} // namespace bitfields_details + +/// Holds functions to get, set or test bitfields. +struct Bitfield { + /// Describes an element of a Bitfield. This type is then used with the + /// Bitfield static member functions. + /// \tparam T The type of the field once in unpacked form. + /// \tparam Offset The position of the first bit. + /// \tparam Size The size of the field. + /// \tparam MaxValue For enums the maximum enum allowed. + template <typename T, unsigned Offset, unsigned Size, + T MaxValue = std::is_enum<T>::value + ? T(0) // coupled with static_assert below + : std::numeric_limits<T>::max()> + struct Element { + using Type = T; + using IntegerType = + typename bitfields_details::ResolveUnderlyingType<T>::type; + static constexpr unsigned Shift = Offset; + static constexpr unsigned Bits = Size; + static constexpr unsigned FirstBit = Offset; + static constexpr unsigned LastBit = Shift + Bits - 1; + static constexpr unsigned NextBit = Shift + Bits; + + private: + template <typename, typename> friend struct bitfields_details::Impl; + + static_assert(Bits > 0, "Bits must be non zero"); + static constexpr size_t TypeBits = sizeof(IntegerType) * CHAR_BIT; + static_assert(Bits <= TypeBits, "Bits may not be greater than T size"); + static_assert(!std::is_enum<T>::value || MaxValue != T(0), + "Enum Bitfields must provide a MaxValue"); + static_assert(!std::is_enum<T>::value || + std::is_unsigned<IntegerType>::value, + "Enum must be unsigned"); + static_assert(std::is_integral<IntegerType>::value && + std::numeric_limits<IntegerType>::is_integer, + "IntegerType must be an integer type"); + + static constexpr IntegerType UserMaxValue = + static_cast<IntegerType>(MaxValue); + }; + + /// Unpacks the field from the `Packed` value. + template <typename Bitfield, typename StorageType> + static typename Bitfield::Type get(StorageType Packed) { + using I = bitfields_details::Impl<Bitfield, StorageType>; + return static_cast<typename Bitfield::Type>(I::extract(Packed)); + } + + /// Return a non-zero value if the field is non-zero. + /// It is more efficient than `getField`. + template <typename Bitfield, typename StorageType> + static StorageType test(StorageType Packed) { + using I = bitfields_details::Impl<Bitfield, StorageType>; + return I::test(Packed); + } + + /// Sets the typed value in the provided `Packed` value. + /// The method will asserts if the provided value is too big to fit in. + template <typename Bitfield, typename StorageType> + static void set(StorageType &Packed, typename Bitfield::Type Value) { + using I = bitfields_details::Impl<Bitfield, StorageType>; + I::update(Packed, static_cast<typename Bitfield::IntegerType>(Value)); + } + + /// Returns whether the two bitfields share common bits. + template <typename A, typename B> static constexpr bool isOverlapping() { + return A::LastBit >= B::FirstBit && B::LastBit >= A::FirstBit; + } + + template <typename A> static constexpr bool areContiguous() { return true; } + template <typename A, typename B, typename... Others> + static constexpr bool areContiguous() { + return A::NextBit == B::FirstBit && areContiguous<B, Others...>(); + } +}; + +} // namespace llvm + +#endif // LLVM_ADT_BITFIELDS_H diff --git a/llvm/include/llvm/ADT/BitmaskEnum.h b/llvm/include/llvm/ADT/BitmaskEnum.h index 1a18bc721b21..89e5508e08e1 100644 --- a/llvm/include/llvm/ADT/BitmaskEnum.h +++ b/llvm/include/llvm/ADT/BitmaskEnum.h @@ -71,49 +71,49 @@ struct is_bitmask_enum : std::false_type {}; template <typename E> struct is_bitmask_enum< - E, typename std::enable_if<sizeof(E::LLVM_BITMASK_LARGEST_ENUMERATOR) >= - 0>::type> : std::true_type {}; + E, std::enable_if_t<sizeof(E::LLVM_BITMASK_LARGEST_ENUMERATOR) >= 0>> + : std::true_type {}; namespace BitmaskEnumDetail { /// Get a bitmask with 1s in all places up to the high-order bit of E's largest /// value. -template <typename E> typename std::underlying_type<E>::type Mask() { +template <typename E> std::underlying_type_t<E> Mask() { // On overflow, NextPowerOf2 returns zero with the type uint64_t, so // subtracting 1 gives us the mask with all bits set, like we want. - return NextPowerOf2(static_cast<typename std::underlying_type<E>::type>( + return NextPowerOf2(static_cast<std::underlying_type_t<E>>( E::LLVM_BITMASK_LARGEST_ENUMERATOR)) - 1; } /// Check that Val is in range for E, and return Val cast to E's underlying /// type. -template <typename E> typename std::underlying_type<E>::type Underlying(E Val) { - auto U = static_cast<typename std::underlying_type<E>::type>(Val); +template <typename E> std::underlying_type_t<E> Underlying(E Val) { + auto U = static_cast<std::underlying_type_t<E>>(Val); assert(U >= 0 && "Negative enum values are not allowed."); assert(U <= Mask<E>() && "Enum value too large (or largest val too small?)"); return U; } -template <typename E, - typename = typename std::enable_if<is_bitmask_enum<E>::value>::type> +constexpr unsigned bitWidth(uint64_t Value) { + return Value ? 1 + bitWidth(Value >> 1) : 0; +} + +template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>> E operator~(E Val) { return static_cast<E>(~Underlying(Val) & Mask<E>()); } -template <typename E, - typename = typename std::enable_if<is_bitmask_enum<E>::value>::type> +template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>> E operator|(E LHS, E RHS) { return static_cast<E>(Underlying(LHS) | Underlying(RHS)); } -template <typename E, - typename = typename std::enable_if<is_bitmask_enum<E>::value>::type> +template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>> E operator&(E LHS, E RHS) { return static_cast<E>(Underlying(LHS) & Underlying(RHS)); } -template <typename E, - typename = typename std::enable_if<is_bitmask_enum<E>::value>::type> +template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>> E operator^(E LHS, E RHS) { return static_cast<E>(Underlying(LHS) ^ Underlying(RHS)); } @@ -121,22 +121,19 @@ E operator^(E LHS, E RHS) { // |=, &=, and ^= return a reference to LHS, to match the behavior of the // operators on builtin types. -template <typename E, - typename = typename std::enable_if<is_bitmask_enum<E>::value>::type> +template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>> E &operator|=(E &LHS, E RHS) { LHS = LHS | RHS; return LHS; } -template <typename E, - typename = typename std::enable_if<is_bitmask_enum<E>::value>::type> +template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>> E &operator&=(E &LHS, E RHS) { LHS = LHS & RHS; return LHS; } -template <typename E, - typename = typename std::enable_if<is_bitmask_enum<E>::value>::type> +template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>> E &operator^=(E &LHS, E RHS) { LHS = LHS ^ RHS; return LHS; @@ -146,6 +143,10 @@ E &operator^=(E &LHS, E RHS) { // Enable bitmask enums in namespace ::llvm and all nested namespaces. LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE(); +template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>> +constexpr unsigned BitWidth = BitmaskEnumDetail::bitWidth(uint64_t{ + static_cast<std::underlying_type_t<E>>( + E::LLVM_BITMASK_LARGEST_ENUMERATOR)}); } // namespace llvm diff --git a/llvm/include/llvm/ADT/CachedHashString.h b/llvm/include/llvm/ADT/CachedHashString.h index 80144fb87e0e..6233d0fc08fd 100644 --- a/llvm/include/llvm/ADT/CachedHashString.h +++ b/llvm/include/llvm/ADT/CachedHashString.h @@ -19,9 +19,8 @@ #ifndef LLVM_ADT_CACHED_HASH_STRING_H #define LLVM_ADT_CACHED_HASH_STRING_H -#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DenseMapInfo.h" #include "llvm/ADT/StringRef.h" -#include "llvm/Support/raw_ostream.h" namespace llvm { diff --git a/llvm/include/llvm/ADT/CoalescingBitVector.h b/llvm/include/llvm/ADT/CoalescingBitVector.h new file mode 100644 index 000000000000..f8c8fec0ec9e --- /dev/null +++ b/llvm/include/llvm/ADT/CoalescingBitVector.h @@ -0,0 +1,444 @@ +//===- llvm/ADT/CoalescingBitVector.h - A coalescing bitvector --*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file A bitvector that uses an IntervalMap to coalesce adjacent elements +/// into intervals. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_COALESCINGBITVECTOR_H +#define LLVM_ADT_COALESCINGBITVECTOR_H + +#include "llvm/ADT/IntervalMap.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/iterator_range.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" + +#include <algorithm> +#include <initializer_list> + +namespace llvm { + +/// A bitvector that, under the hood, relies on an IntervalMap to coalesce +/// elements into intervals. Good for representing sets which predominantly +/// contain contiguous ranges. Bad for representing sets with lots of gaps +/// between elements. +/// +/// Compared to SparseBitVector, CoalescingBitVector offers more predictable +/// performance for non-sequential find() operations. +/// +/// \tparam IndexT - The type of the index into the bitvector. +/// \tparam N - The first N coalesced intervals of set bits are stored in-place. +template <typename IndexT, unsigned N = 16> class CoalescingBitVector { + static_assert(std::is_unsigned<IndexT>::value, + "Index must be an unsigned integer."); + + using ThisT = CoalescingBitVector<IndexT, N>; + + /// An interval map for closed integer ranges. The mapped values are unused. + using MapT = IntervalMap<IndexT, char, N>; + + using UnderlyingIterator = typename MapT::const_iterator; + + using IntervalT = std::pair<IndexT, IndexT>; + +public: + using Allocator = typename MapT::Allocator; + + /// Construct by passing in a CoalescingBitVector<IndexT>::Allocator + /// reference. + CoalescingBitVector(Allocator &Alloc) + : Alloc(&Alloc), Intervals(Alloc) {} + + /// \name Copy/move constructors and assignment operators. + /// @{ + + CoalescingBitVector(const ThisT &Other) + : Alloc(Other.Alloc), Intervals(*Other.Alloc) { + set(Other); + } + + ThisT &operator=(const ThisT &Other) { + clear(); + set(Other); + return *this; + } + + CoalescingBitVector(ThisT &&Other) = delete; + ThisT &operator=(ThisT &&Other) = delete; + + /// @} + + /// Clear all the bits. + void clear() { Intervals.clear(); } + + /// Check whether no bits are set. + bool empty() const { return Intervals.empty(); } + + /// Count the number of set bits. + unsigned count() const { + unsigned Bits = 0; + for (auto It = Intervals.begin(), End = Intervals.end(); It != End; ++It) + Bits += 1 + It.stop() - It.start(); + return Bits; + } + + /// Set the bit at \p Index. + /// + /// This method does /not/ support setting a bit that has already been set, + /// for efficiency reasons. If possible, restructure your code to not set the + /// same bit multiple times, or use \ref test_and_set. + void set(IndexT Index) { + assert(!test(Index) && "Setting already-set bits not supported/efficient, " + "IntervalMap will assert"); + insert(Index, Index); + } + + /// Set the bits set in \p Other. + /// + /// This method does /not/ support setting already-set bits, see \ref set + /// for the rationale. For a safe set union operation, use \ref operator|=. + void set(const ThisT &Other) { + for (auto It = Other.Intervals.begin(), End = Other.Intervals.end(); + It != End; ++It) + insert(It.start(), It.stop()); + } + + /// Set the bits at \p Indices. Used for testing, primarily. + void set(std::initializer_list<IndexT> Indices) { + for (IndexT Index : Indices) + set(Index); + } + + /// Check whether the bit at \p Index is set. + bool test(IndexT Index) const { + const auto It = Intervals.find(Index); + if (It == Intervals.end()) + return false; + assert(It.stop() >= Index && "Interval must end after Index"); + return It.start() <= Index; + } + + /// Set the bit at \p Index. Supports setting an already-set bit. + void test_and_set(IndexT Index) { + if (!test(Index)) + set(Index); + } + + /// Reset the bit at \p Index. Supports resetting an already-unset bit. + void reset(IndexT Index) { + auto It = Intervals.find(Index); + if (It == Intervals.end()) + return; + + // Split the interval containing Index into up to two parts: one from + // [Start, Index-1] and another from [Index+1, Stop]. If Index is equal to + // either Start or Stop, we create one new interval. If Index is equal to + // both Start and Stop, we simply erase the existing interval. + IndexT Start = It.start(); + if (Index < Start) + // The index was not set. + return; + IndexT Stop = It.stop(); + assert(Index <= Stop && "Wrong interval for index"); + It.erase(); + if (Start < Index) + insert(Start, Index - 1); + if (Index < Stop) + insert(Index + 1, Stop); + } + + /// Set union. If \p RHS is guaranteed to not overlap with this, \ref set may + /// be a faster alternative. + void operator|=(const ThisT &RHS) { + // Get the overlaps between the two interval maps. + SmallVector<IntervalT, 8> Overlaps; + getOverlaps(RHS, Overlaps); + + // Insert the non-overlapping parts of all the intervals from RHS. + for (auto It = RHS.Intervals.begin(), End = RHS.Intervals.end(); + It != End; ++It) { + IndexT Start = It.start(); + IndexT Stop = It.stop(); + SmallVector<IntervalT, 8> NonOverlappingParts; + getNonOverlappingParts(Start, Stop, Overlaps, NonOverlappingParts); + for (IntervalT AdditivePortion : NonOverlappingParts) + insert(AdditivePortion.first, AdditivePortion.second); + } + } + + /// Set intersection. + void operator&=(const ThisT &RHS) { + // Get the overlaps between the two interval maps (i.e. the intersection). + SmallVector<IntervalT, 8> Overlaps; + getOverlaps(RHS, Overlaps); + // Rebuild the interval map, including only the overlaps. + clear(); + for (IntervalT Overlap : Overlaps) + insert(Overlap.first, Overlap.second); + } + + /// Reset all bits present in \p Other. + void intersectWithComplement(const ThisT &Other) { + SmallVector<IntervalT, 8> Overlaps; + if (!getOverlaps(Other, Overlaps)) { + // If there is no overlap with Other, the intersection is empty. + return; + } + + // Delete the overlapping intervals. Split up intervals that only partially + // intersect an overlap. + for (IntervalT Overlap : Overlaps) { + IndexT OlapStart, OlapStop; + std::tie(OlapStart, OlapStop) = Overlap; + + auto It = Intervals.find(OlapStart); + IndexT CurrStart = It.start(); + IndexT CurrStop = It.stop(); + assert(CurrStart <= OlapStart && OlapStop <= CurrStop && + "Expected some intersection!"); + + // Split the overlap interval into up to two parts: one from [CurrStart, + // OlapStart-1] and another from [OlapStop+1, CurrStop]. If OlapStart is + // equal to CurrStart, the first split interval is unnecessary. Ditto for + // when OlapStop is equal to CurrStop, we omit the second split interval. + It.erase(); + if (CurrStart < OlapStart) + insert(CurrStart, OlapStart - 1); + if (OlapStop < CurrStop) + insert(OlapStop + 1, CurrStop); + } + } + + bool operator==(const ThisT &RHS) const { + // We cannot just use std::equal because it checks the dereferenced values + // of an iterator pair for equality, not the iterators themselves. In our + // case that results in comparison of the (unused) IntervalMap values. + auto ItL = Intervals.begin(); + auto ItR = RHS.Intervals.begin(); + while (ItL != Intervals.end() && ItR != RHS.Intervals.end() && + ItL.start() == ItR.start() && ItL.stop() == ItR.stop()) { + ++ItL; + ++ItR; + } + return ItL == Intervals.end() && ItR == RHS.Intervals.end(); + } + + bool operator!=(const ThisT &RHS) const { return !operator==(RHS); } + + class const_iterator + : public std::iterator<std::forward_iterator_tag, IndexT> { + friend class CoalescingBitVector; + + // For performance reasons, make the offset at the end different than the + // one used in \ref begin, to optimize the common `It == end()` pattern. + static constexpr unsigned kIteratorAtTheEndOffset = ~0u; + + UnderlyingIterator MapIterator; + unsigned OffsetIntoMapIterator = 0; + + // Querying the start/stop of an IntervalMap iterator can be very expensive. + // Cache these values for performance reasons. + IndexT CachedStart = IndexT(); + IndexT CachedStop = IndexT(); + + void setToEnd() { + OffsetIntoMapIterator = kIteratorAtTheEndOffset; + CachedStart = IndexT(); + CachedStop = IndexT(); + } + + /// MapIterator has just changed, reset the cached state to point to the + /// start of the new underlying iterator. + void resetCache() { + if (MapIterator.valid()) { + OffsetIntoMapIterator = 0; + CachedStart = MapIterator.start(); + CachedStop = MapIterator.stop(); + } else { + setToEnd(); + } + } + + /// Advance the iterator to \p Index, if it is contained within the current + /// interval. The public-facing method which supports advancing past the + /// current interval is \ref advanceToLowerBound. + void advanceTo(IndexT Index) { + assert(Index <= CachedStop && "Cannot advance to OOB index"); + if (Index < CachedStart) + // We're already past this index. + return; + OffsetIntoMapIterator = Index - CachedStart; + } + + const_iterator(UnderlyingIterator MapIt) : MapIterator(MapIt) { + resetCache(); + } + + public: + const_iterator() { setToEnd(); } + + bool operator==(const const_iterator &RHS) const { + // Do /not/ compare MapIterator for equality, as this is very expensive. + // The cached start/stop values make that check unnecessary. + return std::tie(OffsetIntoMapIterator, CachedStart, CachedStop) == + std::tie(RHS.OffsetIntoMapIterator, RHS.CachedStart, + RHS.CachedStop); + } + + bool operator!=(const const_iterator &RHS) const { + return !operator==(RHS); + } + + IndexT operator*() const { return CachedStart + OffsetIntoMapIterator; } + + const_iterator &operator++() { // Pre-increment (++It). + if (CachedStart + OffsetIntoMapIterator < CachedStop) { + // Keep going within the current interval. + ++OffsetIntoMapIterator; + } else { + // We reached the end of the current interval: advance. + ++MapIterator; + resetCache(); + } + return *this; + } + + const_iterator operator++(int) { // Post-increment (It++). + const_iterator tmp = *this; + operator++(); + return tmp; + } + + /// Advance the iterator to the first set bit AT, OR AFTER, \p Index. If + /// no such set bit exists, advance to end(). This is like std::lower_bound. + /// This is useful if \p Index is close to the current iterator position. + /// However, unlike \ref find(), this has worst-case O(n) performance. + void advanceToLowerBound(IndexT Index) { + if (OffsetIntoMapIterator == kIteratorAtTheEndOffset) + return; + + // Advance to the first interval containing (or past) Index, or to end(). + while (Index > CachedStop) { + ++MapIterator; + resetCache(); + if (OffsetIntoMapIterator == kIteratorAtTheEndOffset) + return; + } + + advanceTo(Index); + } + }; + + const_iterator begin() const { return const_iterator(Intervals.begin()); } + + const_iterator end() const { return const_iterator(); } + + /// Return an iterator pointing to the first set bit AT, OR AFTER, \p Index. + /// If no such set bit exists, return end(). This is like std::lower_bound. + /// This has worst-case logarithmic performance (roughly O(log(gaps between + /// contiguous ranges))). + const_iterator find(IndexT Index) const { + auto UnderlyingIt = Intervals.find(Index); + if (UnderlyingIt == Intervals.end()) + return end(); + auto It = const_iterator(UnderlyingIt); + It.advanceTo(Index); + return It; + } + + /// Return a range iterator which iterates over all of the set bits in the + /// half-open range [Start, End). + iterator_range<const_iterator> half_open_range(IndexT Start, + IndexT End) const { + assert(Start < End && "Not a valid range"); + auto StartIt = find(Start); + if (StartIt == end() || *StartIt >= End) + return {end(), end()}; + auto EndIt = StartIt; + EndIt.advanceToLowerBound(End); + return {StartIt, EndIt}; + } + + void print(raw_ostream &OS) const { + OS << "{"; + for (auto It = Intervals.begin(), End = Intervals.end(); It != End; + ++It) { + OS << "[" << It.start(); + if (It.start() != It.stop()) + OS << ", " << It.stop(); + OS << "]"; + } + OS << "}"; + } + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) + LLVM_DUMP_METHOD void dump() const { + // LLDB swallows the first line of output after callling dump(). Add + // newlines before/after the braces to work around this. + dbgs() << "\n"; + print(dbgs()); + dbgs() << "\n"; + } +#endif + +private: + void insert(IndexT Start, IndexT End) { Intervals.insert(Start, End, 0); } + + /// Record the overlaps between \p this and \p Other in \p Overlaps. Return + /// true if there is any overlap. + bool getOverlaps(const ThisT &Other, + SmallVectorImpl<IntervalT> &Overlaps) const { + for (IntervalMapOverlaps<MapT, MapT> I(Intervals, Other.Intervals); + I.valid(); ++I) + Overlaps.emplace_back(I.start(), I.stop()); + assert(llvm::is_sorted(Overlaps, + [](IntervalT LHS, IntervalT RHS) { + return LHS.second < RHS.first; + }) && + "Overlaps must be sorted"); + return !Overlaps.empty(); + } + + /// Given the set of overlaps between this and some other bitvector, and an + /// interval [Start, Stop] from that bitvector, determine the portions of the + /// interval which do not overlap with this. + void getNonOverlappingParts(IndexT Start, IndexT Stop, + const SmallVectorImpl<IntervalT> &Overlaps, + SmallVectorImpl<IntervalT> &NonOverlappingParts) { + IndexT NextUncoveredBit = Start; + for (IntervalT Overlap : Overlaps) { + IndexT OlapStart, OlapStop; + std::tie(OlapStart, OlapStop) = Overlap; + + // [Start;Stop] and [OlapStart;OlapStop] overlap iff OlapStart <= Stop + // and Start <= OlapStop. + bool DoesOverlap = OlapStart <= Stop && Start <= OlapStop; + if (!DoesOverlap) + continue; + + // Cover the range [NextUncoveredBit, OlapStart). This puts the start of + // the next uncovered range at OlapStop+1. + if (NextUncoveredBit < OlapStart) + NonOverlappingParts.emplace_back(NextUncoveredBit, OlapStart - 1); + NextUncoveredBit = OlapStop + 1; + if (NextUncoveredBit > Stop) + break; + } + if (NextUncoveredBit <= Stop) + NonOverlappingParts.emplace_back(NextUncoveredBit, Stop); + } + + Allocator *Alloc; + MapT Intervals; +}; + +} // namespace llvm + +#endif // LLVM_ADT_COALESCINGBITVECTOR_H diff --git a/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h b/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h index d4cdc3c86048..c3872af2a0b4 100644 --- a/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h +++ b/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h @@ -29,7 +29,7 @@ namespace llvm { /// /// P(S) => P(S union pred(S)) /// -/// The minization algorithm uses this dependency information to attempt to +/// The minimization algorithm uses this dependency information to attempt to /// eagerly prune large subsets of changes. As with \see DeltaAlgorithm, the DAG /// is not required to satisfy this property, but the algorithm will run /// substantially fewer tests with appropriate dependencies. \see DeltaAlgorithm diff --git a/llvm/include/llvm/ADT/DeltaAlgorithm.h b/llvm/include/llvm/ADT/DeltaAlgorithm.h index 114b95499530..e1743fd00196 100644 --- a/llvm/include/llvm/ADT/DeltaAlgorithm.h +++ b/llvm/include/llvm/ADT/DeltaAlgorithm.h @@ -54,7 +54,7 @@ private: /// Split - Partition a set of changes \p S into one or two subsets. void Split(const changeset_ty &S, changesetlist_ty &Res); - /// Delta - Minimize a set of \p Changes which has been partioned into + /// Delta - Minimize a set of \p Changes which has been partitioned into /// smaller sets, by attempting to remove individual subsets. changeset_ty Delta(const changeset_ty &Changes, const changesetlist_ty &Sets); diff --git a/llvm/include/llvm/ADT/DenseMap.h b/llvm/include/llvm/ADT/DenseMap.h index 148d319c8603..34d397cc9793 100644 --- a/llvm/include/llvm/ADT/DenseMap.h +++ b/llvm/include/llvm/ADT/DenseMap.h @@ -18,6 +18,7 @@ #include "llvm/Support/AlignOf.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/MemAlloc.h" #include "llvm/Support/ReverseIteration.h" #include "llvm/Support/type_traits.h" #include <algorithm> @@ -119,9 +120,8 @@ public: } const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); - if (is_trivially_copyable<KeyT>::value && - is_trivially_copyable<ValueT>::value) { - // Use a simpler loop when these are trivial types. + if (std::is_trivially_destructible<ValueT>::value) { + // Use a simpler loop when values don't need destruction. for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) P->getFirst() = EmptyKey; } else { @@ -150,13 +150,19 @@ public: iterator find(const_arg_type_t<KeyT> Val) { BucketT *TheBucket; if (LookupBucketFor(Val, TheBucket)) - return makeIterator(TheBucket, getBucketsEnd(), *this, true); + return makeIterator(TheBucket, + shouldReverseIterate<KeyT>() ? getBuckets() + : getBucketsEnd(), + *this, true); return end(); } const_iterator find(const_arg_type_t<KeyT> Val) const { const BucketT *TheBucket; if (LookupBucketFor(Val, TheBucket)) - return makeConstIterator(TheBucket, getBucketsEnd(), *this, true); + return makeConstIterator(TheBucket, + shouldReverseIterate<KeyT>() ? getBuckets() + : getBucketsEnd(), + *this, true); return end(); } @@ -169,14 +175,20 @@ public: iterator find_as(const LookupKeyT &Val) { BucketT *TheBucket; if (LookupBucketFor(Val, TheBucket)) - return makeIterator(TheBucket, getBucketsEnd(), *this, true); + return makeIterator(TheBucket, + shouldReverseIterate<KeyT>() ? getBuckets() + : getBucketsEnd(), + *this, true); return end(); } template<class LookupKeyT> const_iterator find_as(const LookupKeyT &Val) const { const BucketT *TheBucket; if (LookupBucketFor(Val, TheBucket)) - return makeConstIterator(TheBucket, getBucketsEnd(), *this, true); + return makeConstIterator(TheBucket, + shouldReverseIterate<KeyT>() ? getBuckets() + : getBucketsEnd(), + *this, true); return end(); } @@ -210,16 +222,22 @@ public: std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) { BucketT *TheBucket; if (LookupBucketFor(Key, TheBucket)) - return std::make_pair( - makeIterator(TheBucket, getBucketsEnd(), *this, true), - false); // Already in map. + return std::make_pair(makeIterator(TheBucket, + shouldReverseIterate<KeyT>() + ? getBuckets() + : getBucketsEnd(), + *this, true), + false); // Already in map. // Otherwise, insert the new element. TheBucket = InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...); - return std::make_pair( - makeIterator(TheBucket, getBucketsEnd(), *this, true), - true); + return std::make_pair(makeIterator(TheBucket, + shouldReverseIterate<KeyT>() + ? getBuckets() + : getBucketsEnd(), + *this, true), + true); } // Inserts key,value pair into the map if the key isn't already in the map. @@ -229,15 +247,21 @@ public: std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) { BucketT *TheBucket; if (LookupBucketFor(Key, TheBucket)) - return std::make_pair( - makeIterator(TheBucket, getBucketsEnd(), *this, true), - false); // Already in map. + return std::make_pair(makeIterator(TheBucket, + shouldReverseIterate<KeyT>() + ? getBuckets() + : getBucketsEnd(), + *this, true), + false); // Already in map. // Otherwise, insert the new element. TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...); - return std::make_pair( - makeIterator(TheBucket, getBucketsEnd(), *this, true), - true); + return std::make_pair(makeIterator(TheBucket, + shouldReverseIterate<KeyT>() + ? getBuckets() + : getBucketsEnd(), + *this, true), + true); } /// Alternate version of insert() which allows a different, and possibly @@ -250,16 +274,22 @@ public: const LookupKeyT &Val) { BucketT *TheBucket; if (LookupBucketFor(Val, TheBucket)) - return std::make_pair( - makeIterator(TheBucket, getBucketsEnd(), *this, true), - false); // Already in map. + return std::make_pair(makeIterator(TheBucket, + shouldReverseIterate<KeyT>() + ? getBuckets() + : getBucketsEnd(), + *this, true), + false); // Already in map. // Otherwise, insert the new element. TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first), std::move(KV.second), Val); - return std::make_pair( - makeIterator(TheBucket, getBucketsEnd(), *this, true), - true); + return std::make_pair(makeIterator(TheBucket, + shouldReverseIterate<KeyT>() + ? getBuckets() + : getBucketsEnd(), + *this, true), + true); } /// insert - Range insertion of pairs. @@ -695,7 +725,7 @@ class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>, unsigned NumBuckets; public: - /// Create a DenseMap wth an optional \p InitialReserve that guarantee that + /// Create a DenseMap with an optional \p InitialReserve that guarantee that /// this number of elements can be inserted in the map without grow() explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); } @@ -1194,19 +1224,21 @@ public: // for const iterator destinations so it doesn't end up as a user defined copy // constructor. template <bool IsConstSrc, - typename = typename std::enable_if<!IsConstSrc && IsConst>::type> + typename = std::enable_if_t<!IsConstSrc && IsConst>> DenseMapIterator( const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I) : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {} reference operator*() const { assert(isHandleInSync() && "invalid iterator access!"); + assert(Ptr != End && "dereferencing end() iterator"); if (shouldReverseIterate<KeyT>()) return Ptr[-1]; return *Ptr; } pointer operator->() const { assert(isHandleInSync() && "invalid iterator access!"); + assert(Ptr != End && "dereferencing end() iterator"); if (shouldReverseIterate<KeyT>()) return &(Ptr[-1]); return Ptr; @@ -1229,6 +1261,7 @@ public: inline DenseMapIterator& operator++() { // Preincrement assert(isHandleInSync() && "invalid iterator access!"); + assert(Ptr != End && "incrementing end() iterator"); if (shouldReverseIterate<KeyT>()) { --Ptr; RetreatPastEmptyBuckets(); diff --git a/llvm/include/llvm/ADT/DenseMapInfo.h b/llvm/include/llvm/ADT/DenseMapInfo.h index bd4c60c8f13e..e465331ac6f7 100644 --- a/llvm/include/llvm/ADT/DenseMapInfo.h +++ b/llvm/include/llvm/ADT/DenseMapInfo.h @@ -16,8 +16,6 @@ #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Hashing.h" #include "llvm/ADT/StringRef.h" -#include "llvm/Support/PointerLikeTypeTraits.h" -#include "llvm/Support/TypeSize.h" #include <cassert> #include <cstddef> #include <cstdint> @@ -25,6 +23,24 @@ namespace llvm { +namespace detail { + +/// Simplistic combination of 32-bit hash values into 32-bit hash values. +static inline unsigned combineHashValue(unsigned a, unsigned b) { + uint64_t key = (uint64_t)a << 32 | (uint64_t)b; + key += ~(key << 32); + key ^= (key >> 22); + key += ~(key << 13); + key ^= (key >> 8); + key += (key << 3); + key ^= (key >> 15); + key += ~(key << 27); + key ^= (key >> 31); + return (unsigned)key; +} + +} // end namespace detail + template<typename T> struct DenseMapInfo { //static inline T getEmptyKey(); @@ -33,18 +49,28 @@ struct DenseMapInfo { //static bool isEqual(const T &LHS, const T &RHS); }; -// Provide DenseMapInfo for all pointers. +// Provide DenseMapInfo for all pointers. Come up with sentinel pointer values +// that are aligned to alignof(T) bytes, but try to avoid requiring T to be +// complete. This allows clients to instantiate DenseMap<T*, ...> with forward +// declared key types. Assume that no pointer key type requires more than 4096 +// bytes of alignment. template<typename T> struct DenseMapInfo<T*> { + // The following should hold, but it would require T to be complete: + // static_assert(alignof(T) <= (1 << Log2MaxAlign), + // "DenseMap does not support pointer keys requiring more than " + // "Log2MaxAlign bits of alignment"); + static constexpr uintptr_t Log2MaxAlign = 12; + static inline T* getEmptyKey() { uintptr_t Val = static_cast<uintptr_t>(-1); - Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable; + Val <<= Log2MaxAlign; return reinterpret_cast<T*>(Val); } static inline T* getTombstoneKey() { uintptr_t Val = static_cast<uintptr_t>(-2); - Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable; + Val <<= Log2MaxAlign; return reinterpret_cast<T*>(Val); } @@ -198,17 +224,8 @@ struct DenseMapInfo<std::pair<T, U>> { } static unsigned getHashValue(const Pair& PairVal) { - uint64_t key = (uint64_t)FirstInfo::getHashValue(PairVal.first) << 32 - | (uint64_t)SecondInfo::getHashValue(PairVal.second); - key += ~(key << 32); - key ^= (key >> 22); - key += ~(key << 13); - key ^= (key >> 8); - key += (key << 3); - key ^= (key >> 15); - key += ~(key << 27); - key ^= (key >> 31); - return (unsigned)key; + return detail::combineHashValue(FirstInfo::getHashValue(PairVal.first), + SecondInfo::getHashValue(PairVal.second)); } static bool isEqual(const Pair &LHS, const Pair &RHS) { @@ -217,6 +234,56 @@ struct DenseMapInfo<std::pair<T, U>> { } }; +// Provide DenseMapInfo for all tuples whose members have info. +template <typename... Ts> struct DenseMapInfo<std::tuple<Ts...>> { + using Tuple = std::tuple<Ts...>; + + static inline Tuple getEmptyKey() { + return Tuple(DenseMapInfo<Ts>::getEmptyKey()...); + } + + static inline Tuple getTombstoneKey() { + return Tuple(DenseMapInfo<Ts>::getTombstoneKey()...); + } + + template <unsigned I> + static unsigned getHashValueImpl(const Tuple &values, std::false_type) { + using EltType = typename std::tuple_element<I, Tuple>::type; + std::integral_constant<bool, I + 1 == sizeof...(Ts)> atEnd; + return detail::combineHashValue( + DenseMapInfo<EltType>::getHashValue(std::get<I>(values)), + getHashValueImpl<I + 1>(values, atEnd)); + } + + template <unsigned I> + static unsigned getHashValueImpl(const Tuple &values, std::true_type) { + return 0; + } + + static unsigned getHashValue(const std::tuple<Ts...> &values) { + std::integral_constant<bool, 0 == sizeof...(Ts)> atEnd; + return getHashValueImpl<0>(values, atEnd); + } + + template <unsigned I> + static bool isEqualImpl(const Tuple &lhs, const Tuple &rhs, std::false_type) { + using EltType = typename std::tuple_element<I, Tuple>::type; + std::integral_constant<bool, I + 1 == sizeof...(Ts)> atEnd; + return DenseMapInfo<EltType>::isEqual(std::get<I>(lhs), std::get<I>(rhs)) && + isEqualImpl<I + 1>(lhs, rhs, atEnd); + } + + template <unsigned I> + static bool isEqualImpl(const Tuple &lhs, const Tuple &rhs, std::true_type) { + return true; + } + + static bool isEqual(const Tuple &lhs, const Tuple &rhs) { + std::integral_constant<bool, 0 == sizeof...(Ts)> atEnd; + return isEqualImpl<0>(lhs, rhs, atEnd); + } +}; + // Provide DenseMapInfo for StringRefs. template <> struct DenseMapInfo<StringRef> { static inline StringRef getEmptyKey() { @@ -280,21 +347,6 @@ template <> struct DenseMapInfo<hash_code> { static bool isEqual(hash_code LHS, hash_code RHS) { return LHS == RHS; } }; -template <> struct DenseMapInfo<ElementCount> { - static inline ElementCount getEmptyKey() { return {~0U, true}; } - static inline ElementCount getTombstoneKey() { return {~0U - 1, false}; } - static unsigned getHashValue(const ElementCount& EltCnt) { - if (EltCnt.Scalable) - return (EltCnt.Min * 37U) - 1U; - - return EltCnt.Min * 37U; - } - - static bool isEqual(const ElementCount& LHS, const ElementCount& RHS) { - return LHS == RHS; - } -}; - } // end namespace llvm #endif // LLVM_ADT_DENSEMAPINFO_H diff --git a/llvm/include/llvm/ADT/DenseSet.h b/llvm/include/llvm/ADT/DenseSet.h index 9afb715ae1db..07edc3d8e4ec 100644 --- a/llvm/include/llvm/ADT/DenseSet.h +++ b/llvm/include/llvm/ADT/DenseSet.h @@ -66,6 +66,12 @@ public: explicit DenseSetImpl(unsigned InitialReserve = 0) : TheMap(InitialReserve) {} + template <typename InputIt> + DenseSetImpl(const InputIt &I, const InputIt &E) + : DenseSetImpl(PowerOf2Ceil(std::distance(I, E))) { + insert(I, E); + } + DenseSetImpl(std::initializer_list<ValueT> Elems) : DenseSetImpl(PowerOf2Ceil(Elems.size())) { insert(Elems.begin(), Elems.end()); diff --git a/llvm/include/llvm/ADT/EnumeratedArray.h b/llvm/include/llvm/ADT/EnumeratedArray.h index a9528115618c..a66ec9d08c37 100644 --- a/llvm/include/llvm/ADT/EnumeratedArray.h +++ b/llvm/include/llvm/ADT/EnumeratedArray.h @@ -38,6 +38,7 @@ public: static_cast<const EnumeratedArray<ValueType, Enumeration, LargestEnum, IndexType, Size> &>(*this)[Index]); } + inline IndexType size() { return Size; } private: ValueType Underlying[Size]; diff --git a/llvm/include/llvm/ADT/FloatingPointMode.h b/llvm/include/llvm/ADT/FloatingPointMode.h index 670b2368da9f..3ba8ae1b2855 100644 --- a/llvm/include/llvm/ADT/FloatingPointMode.h +++ b/llvm/include/llvm/ADT/FloatingPointMode.h @@ -14,28 +14,123 @@ #define LLVM_FLOATINGPOINTMODE_H #include "llvm/ADT/StringSwitch.h" +#include "llvm/Support/raw_ostream.h" namespace llvm { -/// Represent handled modes for denormal (aka subnormal) modes in the floating -/// point environment. -enum class DenormalMode { - Invalid = -1, +/// Rounding mode. +/// +/// Enumerates supported rounding modes, as well as some special values. The set +/// of the modes must agree with IEEE-754, 4.3.1 and 4.3.2. The constants +/// assigned to the IEEE rounding modes must agree with the values used by +/// FLT_ROUNDS (C11, 5.2.4.2.2p8). +/// +/// This value is packed into bitfield in some cases, including \c FPOptions, so +/// the rounding mode values and the special value \c Dynamic must fit into the +/// the bit field (now - 3 bits). The value \c Invalid is used only in values +/// returned by intrinsics to indicate errors, it should never be stored as +/// rounding mode value, so it does not need to fit the bit fields. +/// +enum class RoundingMode : int8_t { + // Rounding mode defined in IEEE-754. + TowardZero = 0, ///< roundTowardZero. + NearestTiesToEven = 1, ///< roundTiesToEven. + TowardPositive = 2, ///< roundTowardPositive. + TowardNegative = 3, ///< roundTowardNegative. + NearestTiesToAway = 4, ///< roundTiesToAway. - /// IEEE-754 denormal numbers preserved. - IEEE, + // Special values. + Dynamic = 7, ///< Denotes mode unknown at compile time. + Invalid = -1 ///< Denotes invalid value. +}; + +/// Represent subnormal handling kind for floating point instruction inputs and +/// outputs. +struct DenormalMode { + /// Represent handled modes for denormal (aka subnormal) modes in the floating + /// point environment. + enum DenormalModeKind : int8_t { + Invalid = -1, + + /// IEEE-754 denormal numbers preserved. + IEEE, + + /// The sign of a flushed-to-zero number is preserved in the sign of 0 + PreserveSign, + + /// Denormals are flushed to positive zero. + PositiveZero + }; - /// The sign of a flushed-to-zero number is preserved in the sign of 0 - PreserveSign, + /// Denormal flushing mode for floating point instruction results in the + /// default floating point environment. + DenormalModeKind Output = DenormalModeKind::Invalid; + + /// Denormal treatment kind for floating point instruction inputs in the + /// default floating-point environment. If this is not DenormalModeKind::IEEE, + /// floating-point instructions implicitly treat the input value as 0. + DenormalModeKind Input = DenormalModeKind::Invalid; + + constexpr DenormalMode() = default; + constexpr DenormalMode(DenormalModeKind Out, DenormalModeKind In) : + Output(Out), Input(In) {} + + + static constexpr DenormalMode getInvalid() { + return DenormalMode(DenormalModeKind::Invalid, DenormalModeKind::Invalid); + } - /// Denormals are flushed to positive zero. - PositiveZero + static constexpr DenormalMode getIEEE() { + return DenormalMode(DenormalModeKind::IEEE, DenormalModeKind::IEEE); + } + + static constexpr DenormalMode getPreserveSign() { + return DenormalMode(DenormalModeKind::PreserveSign, + DenormalModeKind::PreserveSign); + } + + static constexpr DenormalMode getPositiveZero() { + return DenormalMode(DenormalModeKind::PositiveZero, + DenormalModeKind::PositiveZero); + } + + bool operator==(DenormalMode Other) const { + return Output == Other.Output && Input == Other.Input; + } + + bool operator!=(DenormalMode Other) const { + return !(*this == Other); + } + + bool isSimple() const { + return Input == Output; + } + + bool isValid() const { + return Output != DenormalModeKind::Invalid && + Input != DenormalModeKind::Invalid; + } + + inline void print(raw_ostream &OS) const; + + inline std::string str() const { + std::string storage; + raw_string_ostream OS(storage); + print(OS); + return OS.str(); + } }; +inline raw_ostream& operator<<(raw_ostream &OS, DenormalMode Mode) { + Mode.print(OS); + return OS; +} + /// Parse the expected names from the denormal-fp-math attribute. -inline DenormalMode parseDenormalFPAttribute(StringRef Str) { +inline DenormalMode::DenormalModeKind +parseDenormalFPAttributeComponent(StringRef Str) { // Assume ieee on unspecified attribute. - return StringSwitch<DenormalMode>(Str) + return StringSwitch<DenormalMode::DenormalModeKind>(Str) .Cases("", "ieee", DenormalMode::IEEE) .Case("preserve-sign", DenormalMode::PreserveSign) .Case("positive-zero", DenormalMode::PositiveZero) @@ -44,7 +139,7 @@ inline DenormalMode parseDenormalFPAttribute(StringRef Str) { /// Return the name used for the denormal handling mode used by the the /// expected names from the denormal-fp-math attribute. -inline StringRef denormalModeName(DenormalMode Mode) { +inline StringRef denormalModeKindName(DenormalMode::DenormalModeKind Mode) { switch (Mode) { case DenormalMode::IEEE: return "ieee"; @@ -57,6 +152,26 @@ inline StringRef denormalModeName(DenormalMode Mode) { } } +/// Returns the denormal mode to use for inputs and outputs. +inline DenormalMode parseDenormalFPAttribute(StringRef Str) { + StringRef OutputStr, InputStr; + std::tie(OutputStr, InputStr) = Str.split(','); + + DenormalMode Mode; + Mode.Output = parseDenormalFPAttributeComponent(OutputStr); + + // Maintain compatability with old form of the attribute which only specified + // one component. + Mode.Input = InputStr.empty() ? Mode.Output : + parseDenormalFPAttributeComponent(InputStr); + + return Mode; +} + +void DenormalMode::print(raw_ostream &OS) const { + OS << denormalModeKindName(Output) << ',' << denormalModeKindName(Input); +} + } #endif // LLVM_FLOATINGPOINTMODE_H diff --git a/llvm/include/llvm/ADT/FoldingSet.h b/llvm/include/llvm/ADT/FoldingSet.h index 4968b1ea7780..fb1cb03a4b5c 100644 --- a/llvm/include/llvm/ADT/FoldingSet.h +++ b/llvm/include/llvm/ADT/FoldingSet.h @@ -110,8 +110,6 @@ class StringRef; /// back to the bucket to facilitate node removal. /// class FoldingSetBase { - virtual void anchor(); // Out of line virtual method. - protected: /// Buckets - Array of bucket chains. void **Buckets; @@ -154,11 +152,6 @@ public: /// empty - Returns true if there are no nodes in the folding set. bool empty() const { return NumNodes == 0; } - /// reserve - Increase the number of buckets such that adding the - /// EltCount-th node won't cause a rebucket operation. reserve is permitted - /// to allocate more space than requested by EltCount. - void reserve(unsigned EltCount); - /// capacity - Returns the number of nodes permitted in the folding set /// before a rebucket operation is performed. unsigned capacity() { @@ -167,32 +160,46 @@ public: return NumBuckets * 2; } +protected: + /// Functions provided by the derived class to compute folding properties. + /// This is effectively a vtable for FoldingSetBase, except that we don't + /// actually store a pointer to it in the object. + struct FoldingSetInfo { + /// GetNodeProfile - Instantiations of the FoldingSet template implement + /// this function to gather data bits for the given node. + void (*GetNodeProfile)(const FoldingSetBase *Self, Node *N, + FoldingSetNodeID &ID); + + /// NodeEquals - Instantiations of the FoldingSet template implement + /// this function to compare the given node with the given ID. + bool (*NodeEquals)(const FoldingSetBase *Self, Node *N, + const FoldingSetNodeID &ID, unsigned IDHash, + FoldingSetNodeID &TempID); + + /// ComputeNodeHash - Instantiations of the FoldingSet template implement + /// this function to compute a hash value for the given node. + unsigned (*ComputeNodeHash)(const FoldingSetBase *Self, Node *N, + FoldingSetNodeID &TempID); + }; + private: /// GrowHashTable - Double the size of the hash table and rehash everything. - void GrowHashTable(); + void GrowHashTable(const FoldingSetInfo &Info); /// GrowBucketCount - resize the hash table and rehash everything. /// NewBucketCount must be a power of two, and must be greater than the old /// bucket count. - void GrowBucketCount(unsigned NewBucketCount); + void GrowBucketCount(unsigned NewBucketCount, const FoldingSetInfo &Info); protected: - /// GetNodeProfile - Instantiations of the FoldingSet template implement - /// this function to gather data bits for the given node. - virtual void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const = 0; - - /// NodeEquals - Instantiations of the FoldingSet template implement - /// this function to compare the given node with the given ID. - virtual bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash, - FoldingSetNodeID &TempID) const=0; - - /// ComputeNodeHash - Instantiations of the FoldingSet template implement - /// this function to compute a hash value for the given node. - virtual unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const = 0; - // The below methods are protected to encourage subclasses to provide a more // type-safe API. + /// reserve - Increase the number of buckets such that adding the + /// EltCount-th node won't cause a rebucket operation. reserve is permitted + /// to allocate more space than requested by EltCount. + void reserve(unsigned EltCount, const FoldingSetInfo &Info); + /// RemoveNode - Remove a node from the folding set, returning true if one /// was removed or false if the node was not in the folding set. bool RemoveNode(Node *N); @@ -200,17 +207,18 @@ protected: /// GetOrInsertNode - If there is an existing simple Node exactly /// equal to the specified node, return it. Otherwise, insert 'N' and return /// it instead. - Node *GetOrInsertNode(Node *N); + Node *GetOrInsertNode(Node *N, const FoldingSetInfo &Info); /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists, /// return it. If not, return the insertion token that will make insertion /// faster. - Node *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos); + Node *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos, + const FoldingSetInfo &Info); /// InsertNode - Insert the specified node into the folding set, knowing that /// it is not already in the folding set. InsertPos must be obtained from /// FindNodeOrInsertPos. - void InsertNode(Node *N, void *InsertPos); + void InsertNode(Node *N, void *InsertPos, const FoldingSetInfo &Info); }; //===----------------------------------------------------------------------===// @@ -397,7 +405,7 @@ DefaultContextualFoldingSetTrait<T, Ctx>::ComputeHash(T &X, //===----------------------------------------------------------------------===// /// FoldingSetImpl - An implementation detail that lets us share code between /// FoldingSet and ContextualFoldingSet. -template <class T> class FoldingSetImpl : public FoldingSetBase { +template <class Derived, class T> class FoldingSetImpl : public FoldingSetBase { protected: explicit FoldingSetImpl(unsigned Log2InitSize) : FoldingSetBase(Log2InitSize) {} @@ -427,29 +435,40 @@ public: return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true); } + /// reserve - Increase the number of buckets such that adding the + /// EltCount-th node won't cause a rebucket operation. reserve is permitted + /// to allocate more space than requested by EltCount. + void reserve(unsigned EltCount) { + return FoldingSetBase::reserve(EltCount, Derived::getFoldingSetInfo()); + } + /// RemoveNode - Remove a node from the folding set, returning true if one /// was removed or false if the node was not in the folding set. - bool RemoveNode(T *N) { return FoldingSetBase::RemoveNode(N); } + bool RemoveNode(T *N) { + return FoldingSetBase::RemoveNode(N); + } /// GetOrInsertNode - If there is an existing simple Node exactly /// equal to the specified node, return it. Otherwise, insert 'N' and /// return it instead. T *GetOrInsertNode(T *N) { - return static_cast<T *>(FoldingSetBase::GetOrInsertNode(N)); + return static_cast<T *>( + FoldingSetBase::GetOrInsertNode(N, Derived::getFoldingSetInfo())); } /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists, /// return it. If not, return the insertion token that will make insertion /// faster. T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) { - return static_cast<T *>(FoldingSetBase::FindNodeOrInsertPos(ID, InsertPos)); + return static_cast<T *>(FoldingSetBase::FindNodeOrInsertPos( + ID, InsertPos, Derived::getFoldingSetInfo())); } /// InsertNode - Insert the specified node into the folding set, knowing that /// it is not already in the folding set. InsertPos must be obtained from /// FindNodeOrInsertPos. void InsertNode(T *N, void *InsertPos) { - FoldingSetBase::InsertNode(N, InsertPos); + FoldingSetBase::InsertNode(N, InsertPos, Derived::getFoldingSetInfo()); } /// InsertNode - Insert the specified node into the folding set, knowing that @@ -470,32 +489,43 @@ public: /// moved-from state is not a valid state for anything other than /// move-assigning and destroying. This is primarily to enable movable APIs /// that incorporate these objects. -template <class T> class FoldingSet final : public FoldingSetImpl<T> { - using Super = FoldingSetImpl<T>; +template <class T> +class FoldingSet : public FoldingSetImpl<FoldingSet<T>, T> { + using Super = FoldingSetImpl<FoldingSet, T>; using Node = typename Super::Node; - /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a + /// GetNodeProfile - Each instantiation of the FoldingSet needs to provide a /// way to convert nodes into a unique specifier. - void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const override { + static void GetNodeProfile(const FoldingSetBase *, Node *N, + FoldingSetNodeID &ID) { T *TN = static_cast<T *>(N); FoldingSetTrait<T>::Profile(*TN, ID); } /// NodeEquals - Instantiations may optionally provide a way to compare a /// node with a specified ID. - bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash, - FoldingSetNodeID &TempID) const override { + static bool NodeEquals(const FoldingSetBase *, Node *N, + const FoldingSetNodeID &ID, unsigned IDHash, + FoldingSetNodeID &TempID) { T *TN = static_cast<T *>(N); return FoldingSetTrait<T>::Equals(*TN, ID, IDHash, TempID); } /// ComputeNodeHash - Instantiations may optionally provide a way to compute a /// hash value directly from a node. - unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const override { + static unsigned ComputeNodeHash(const FoldingSetBase *, Node *N, + FoldingSetNodeID &TempID) { T *TN = static_cast<T *>(N); return FoldingSetTrait<T>::ComputeHash(*TN, TempID); } + static const FoldingSetBase::FoldingSetInfo &getFoldingSetInfo() { + static constexpr FoldingSetBase::FoldingSetInfo Info = { + GetNodeProfile, NodeEquals, ComputeNodeHash}; + return Info; + } + friend Super; + public: explicit FoldingSet(unsigned Log2InitSize = 6) : Super(Log2InitSize) {} FoldingSet(FoldingSet &&Arg) = default; @@ -512,35 +542,51 @@ public: /// function with signature /// void Profile(FoldingSetNodeID &, Ctx); template <class T, class Ctx> -class ContextualFoldingSet final : public FoldingSetImpl<T> { +class ContextualFoldingSet + : public FoldingSetImpl<ContextualFoldingSet<T, Ctx>, T> { // Unfortunately, this can't derive from FoldingSet<T> because the // construction of the vtable for FoldingSet<T> requires // FoldingSet<T>::GetNodeProfile to be instantiated, which in turn // requires a single-argument T::Profile(). - using Super = FoldingSetImpl<T>; + using Super = FoldingSetImpl<ContextualFoldingSet, T>; using Node = typename Super::Node; Ctx Context; + static const Ctx &getContext(const FoldingSetBase *Base) { + return static_cast<const ContextualFoldingSet*>(Base)->Context; + } + /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a /// way to convert nodes into a unique specifier. - void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const override { + static void GetNodeProfile(const FoldingSetBase *Base, Node *N, + FoldingSetNodeID &ID) { T *TN = static_cast<T *>(N); - ContextualFoldingSetTrait<T, Ctx>::Profile(*TN, ID, Context); + ContextualFoldingSetTrait<T, Ctx>::Profile(*TN, ID, getContext(Base)); } - bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash, - FoldingSetNodeID &TempID) const override { + static bool NodeEquals(const FoldingSetBase *Base, Node *N, + const FoldingSetNodeID &ID, unsigned IDHash, + FoldingSetNodeID &TempID) { T *TN = static_cast<T *>(N); return ContextualFoldingSetTrait<T, Ctx>::Equals(*TN, ID, IDHash, TempID, - Context); + getContext(Base)); } - unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const override { + static unsigned ComputeNodeHash(const FoldingSetBase *Base, Node *N, + FoldingSetNodeID &TempID) { T *TN = static_cast<T *>(N); - return ContextualFoldingSetTrait<T, Ctx>::ComputeHash(*TN, TempID, Context); + return ContextualFoldingSetTrait<T, Ctx>::ComputeHash(*TN, TempID, + getContext(Base)); + } + + static const FoldingSetBase::FoldingSetInfo &getFoldingSetInfo() { + static constexpr FoldingSetBase::FoldingSetInfo Info = { + GetNodeProfile, NodeEquals, ComputeNodeHash}; + return Info; } + friend Super; public: explicit ContextualFoldingSet(Ctx Context, unsigned Log2InitSize = 6) diff --git a/llvm/include/llvm/ADT/FunctionExtras.h b/llvm/include/llvm/ADT/FunctionExtras.h index 121aa527a5da..4c75e4d2547b 100644 --- a/llvm/include/llvm/ADT/FunctionExtras.h +++ b/llvm/include/llvm/ADT/FunctionExtras.h @@ -11,11 +11,11 @@ /// in `<function>`. /// /// It provides `unique_function`, which works like `std::function` but supports -/// move-only callable objects. +/// move-only callable objects and const-qualification. /// /// Future plans: -/// - Add a `function` that provides const, volatile, and ref-qualified support, -/// which doesn't work with `std::function`. +/// - Add a `function` that provides ref-qualified support, which doesn't work +/// with `std::function`. /// - Provide support for specifying multiple signatures to type erase callable /// objects with an overload set, such as those produced by generic lambdas. /// - Expand to include a copyable utility that directly replaces std::function @@ -34,15 +34,34 @@ #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/PointerUnion.h" +#include "llvm/Support/MemAlloc.h" #include "llvm/Support/type_traits.h" #include <memory> +#include <type_traits> namespace llvm { +/// unique_function is a type-erasing functor similar to std::function. +/// +/// It can hold move-only function objects, like lambdas capturing unique_ptrs. +/// Accordingly, it is movable but not copyable. +/// +/// It supports const-qualification: +/// - unique_function<int() const> has a const operator(). +/// It can only hold functions which themselves have a const operator(). +/// - unique_function<int()> has a non-const operator(). +/// It can hold functions with a non-const operator(), like mutable lambdas. template <typename FunctionT> class unique_function; -template <typename ReturnT, typename... ParamTs> -class unique_function<ReturnT(ParamTs...)> { +namespace detail { + +template <typename T> +using EnableIfTrivial = + std::enable_if_t<llvm::is_trivially_move_constructible<T>::value && + std::is_trivially_destructible<T>::value>; + +template <typename ReturnT, typename... ParamTs> class UniqueFunctionBase { +protected: static constexpr size_t InlineStorageSize = sizeof(void *) * 3; // MSVC has a bug and ICEs if we give it a particular dependent value @@ -112,8 +131,11 @@ class unique_function<ReturnT(ParamTs...)> { // For in-line storage, we just provide an aligned character buffer. We // provide three pointers worth of storage here. - typename std::aligned_storage<InlineStorageSize, alignof(void *)>::type - InlineStorage; + // This is mutable as an inlined `const unique_function<void() const>` may + // still modify its own mutable members. + mutable + typename std::aligned_storage<InlineStorageSize, alignof(void *)>::type + InlineStorage; } StorageUnion; // A compressed pointer to either our dispatching callback or our table of @@ -136,11 +158,25 @@ class unique_function<ReturnT(ParamTs...)> { .template get<NonTrivialCallbacks *>(); } - void *getInlineStorage() { return &StorageUnion.InlineStorage; } + CallPtrT getCallPtr() const { + return isTrivialCallback() ? getTrivialCallback() + : getNonTrivialCallbacks()->CallPtr; + } - void *getOutOfLineStorage() { + // These three functions are only const in the narrow sense. They return + // mutable pointers to function state. + // This allows unique_function<T const>::operator() to be const, even if the + // underlying functor may be internally mutable. + // + // const callers must ensure they're only used in const-correct ways. + void *getCalleePtr() const { + return isInlineStorage() ? getInlineStorage() : getOutOfLineStorage(); + } + void *getInlineStorage() const { return &StorageUnion.InlineStorage; } + void *getOutOfLineStorage() const { return StorageUnion.OutOfLineStorage.StoragePtr; } + size_t getOutOfLineStorageSize() const { return StorageUnion.OutOfLineStorage.Size; } @@ -152,10 +188,11 @@ class unique_function<ReturnT(ParamTs...)> { StorageUnion.OutOfLineStorage = {Ptr, Size, Alignment}; } - template <typename CallableT> - static ReturnT CallImpl(void *CallableAddr, AdjustedParamT<ParamTs>... Params) { - return (*reinterpret_cast<CallableT *>(CallableAddr))( - std::forward<ParamTs>(Params)...); + template <typename CalledAsT> + static ReturnT CallImpl(void *CallableAddr, + AdjustedParamT<ParamTs>... Params) { + auto &Func = *reinterpret_cast<CalledAsT *>(CallableAddr); + return Func(std::forward<ParamTs>(Params)...); } template <typename CallableT> @@ -169,11 +206,54 @@ class unique_function<ReturnT(ParamTs...)> { reinterpret_cast<CallableT *>(CallableAddr)->~CallableT(); } -public: - unique_function() = default; - unique_function(std::nullptr_t /*null_callable*/) {} + // The pointers to call/move/destroy functions are determined for each + // callable type (and called-as type, which determines the overload chosen). + // (definitions are out-of-line). + + // By default, we need an object that contains all the different + // type erased behaviors needed. Create a static instance of the struct type + // here and each instance will contain a pointer to it. + // Wrap in a struct to avoid https://gcc.gnu.org/PR71954 + template <typename CallableT, typename CalledAs, typename Enable = void> + struct CallbacksHolder { + static NonTrivialCallbacks Callbacks; + }; + // See if we can create a trivial callback. We need the callable to be + // trivially moved and trivially destroyed so that we don't have to store + // type erased callbacks for those operations. + template <typename CallableT, typename CalledAs> + struct CallbacksHolder<CallableT, CalledAs, EnableIfTrivial<CallableT>> { + static TrivialCallback Callbacks; + }; + + // A simple tag type so the call-as type to be passed to the constructor. + template <typename T> struct CalledAs {}; + + // Essentially the "main" unique_function constructor, but subclasses + // provide the qualified type to be used for the call. + // (We always store a T, even if the call will use a pointer to const T). + template <typename CallableT, typename CalledAsT> + UniqueFunctionBase(CallableT Callable, CalledAs<CalledAsT>) { + bool IsInlineStorage = true; + void *CallableAddr = getInlineStorage(); + if (sizeof(CallableT) > InlineStorageSize || + alignof(CallableT) > alignof(decltype(StorageUnion.InlineStorage))) { + IsInlineStorage = false; + // Allocate out-of-line storage. FIXME: Use an explicit alignment + // parameter in C++17 mode. + auto Size = sizeof(CallableT); + auto Alignment = alignof(CallableT); + CallableAddr = allocate_buffer(Size, Alignment); + setOutOfLineStorage(CallableAddr, Size, Alignment); + } + + // Now move into the storage. + new (CallableAddr) CallableT(std::move(Callable)); + CallbackAndInlineFlag.setPointerAndInt( + &CallbacksHolder<CallableT, CalledAsT>::Callbacks, IsInlineStorage); + } - ~unique_function() { + ~UniqueFunctionBase() { if (!CallbackAndInlineFlag.getPointer()) return; @@ -189,7 +269,7 @@ public: getOutOfLineStorageAlignment()); } - unique_function(unique_function &&RHS) noexcept { + UniqueFunctionBase(UniqueFunctionBase &&RHS) noexcept { // Copy the callback and inline flag. CallbackAndInlineFlag = RHS.CallbackAndInlineFlag; @@ -218,72 +298,83 @@ public: #endif } - unique_function &operator=(unique_function &&RHS) noexcept { + UniqueFunctionBase &operator=(UniqueFunctionBase &&RHS) noexcept { if (this == &RHS) return *this; // Because we don't try to provide any exception safety guarantees we can // implement move assignment very simply by first destroying the current // object and then move-constructing over top of it. - this->~unique_function(); - new (this) unique_function(std::move(RHS)); + this->~UniqueFunctionBase(); + new (this) UniqueFunctionBase(std::move(RHS)); return *this; } - template <typename CallableT> unique_function(CallableT Callable) { - bool IsInlineStorage = true; - void *CallableAddr = getInlineStorage(); - if (sizeof(CallableT) > InlineStorageSize || - alignof(CallableT) > alignof(decltype(StorageUnion.InlineStorage))) { - IsInlineStorage = false; - // Allocate out-of-line storage. FIXME: Use an explicit alignment - // parameter in C++17 mode. - auto Size = sizeof(CallableT); - auto Alignment = alignof(CallableT); - CallableAddr = allocate_buffer(Size, Alignment); - setOutOfLineStorage(CallableAddr, Size, Alignment); - } + UniqueFunctionBase() = default; - // Now move into the storage. - new (CallableAddr) CallableT(std::move(Callable)); +public: + explicit operator bool() const { + return (bool)CallbackAndInlineFlag.getPointer(); + } +}; - // See if we can create a trivial callback. We need the callable to be - // trivially moved and trivially destroyed so that we don't have to store - // type erased callbacks for those operations. - // - // FIXME: We should use constexpr if here and below to avoid instantiating - // the non-trivial static objects when unnecessary. While the linker should - // remove them, it is still wasteful. - if (llvm::is_trivially_move_constructible<CallableT>::value && - std::is_trivially_destructible<CallableT>::value) { - // We need to create a nicely aligned object. We use a static variable - // for this because it is a trivial struct. - static TrivialCallback Callback = { &CallImpl<CallableT> }; - - CallbackAndInlineFlag = {&Callback, IsInlineStorage}; - return; - } +template <typename R, typename... P> +template <typename CallableT, typename CalledAsT, typename Enable> +typename UniqueFunctionBase<R, P...>::NonTrivialCallbacks UniqueFunctionBase< + R, P...>::CallbacksHolder<CallableT, CalledAsT, Enable>::Callbacks = { + &CallImpl<CalledAsT>, &MoveImpl<CallableT>, &DestroyImpl<CallableT>}; - // Otherwise, we need to point at an object that contains all the different - // type erased behaviors needed. Create a static instance of the struct type - // here and then use a pointer to that. - static NonTrivialCallbacks Callbacks = { - &CallImpl<CallableT>, &MoveImpl<CallableT>, &DestroyImpl<CallableT>}; +template <typename R, typename... P> +template <typename CallableT, typename CalledAsT> +typename UniqueFunctionBase<R, P...>::TrivialCallback + UniqueFunctionBase<R, P...>::CallbacksHolder< + CallableT, CalledAsT, EnableIfTrivial<CallableT>>::Callbacks{ + &CallImpl<CalledAsT>}; - CallbackAndInlineFlag = {&Callbacks, IsInlineStorage}; - } +} // namespace detail + +template <typename R, typename... P> +class unique_function<R(P...)> : public detail::UniqueFunctionBase<R, P...> { + using Base = detail::UniqueFunctionBase<R, P...>; + +public: + unique_function() = default; + unique_function(std::nullptr_t) {} + unique_function(unique_function &&) = default; + unique_function(const unique_function &) = delete; + unique_function &operator=(unique_function &&) = default; + unique_function &operator=(const unique_function &) = delete; - ReturnT operator()(ParamTs... Params) { - void *CallableAddr = - isInlineStorage() ? getInlineStorage() : getOutOfLineStorage(); + template <typename CallableT> + unique_function(CallableT Callable) + : Base(std::forward<CallableT>(Callable), + typename Base::template CalledAs<CallableT>{}) {} - return (isTrivialCallback() - ? getTrivialCallback() - : getNonTrivialCallbacks()->CallPtr)(CallableAddr, Params...); + R operator()(P... Params) { + return this->getCallPtr()(this->getCalleePtr(), Params...); } +}; - explicit operator bool() const { - return (bool)CallbackAndInlineFlag.getPointer(); +template <typename R, typename... P> +class unique_function<R(P...) const> + : public detail::UniqueFunctionBase<R, P...> { + using Base = detail::UniqueFunctionBase<R, P...>; + +public: + unique_function() = default; + unique_function(std::nullptr_t) {} + unique_function(unique_function &&) = default; + unique_function(const unique_function &) = delete; + unique_function &operator=(unique_function &&) = default; + unique_function &operator=(const unique_function &) = delete; + + template <typename CallableT> + unique_function(CallableT Callable) + : Base(std::forward<CallableT>(Callable), + typename Base::template CalledAs<const CallableT>{}) {} + + R operator()(P... Params) const { + return this->getCallPtr()(this->getCalleePtr(), Params...); } }; diff --git a/llvm/include/llvm/ADT/Hashing.h b/llvm/include/llvm/ADT/Hashing.h index adcc5cf54da9..9ee310c879fd 100644 --- a/llvm/include/llvm/ADT/Hashing.h +++ b/llvm/include/llvm/ADT/Hashing.h @@ -101,8 +101,7 @@ public: /// differing argument types even if they would implicit promote to a common /// type without changing the value. template <typename T> -typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type -hash_value(T value); +std::enable_if_t<is_integral_or_enum<T>::value, hash_code> hash_value(T value); /// Compute a hash_code for a pointer's address. /// @@ -158,10 +157,10 @@ inline uint32_t fetch32(const char *p) { } /// Some primes between 2^63 and 2^64 for various uses. -static const uint64_t k0 = 0xc3a5c85c97cb3127ULL; -static const uint64_t k1 = 0xb492b66fbe98f273ULL; -static const uint64_t k2 = 0x9ae16a3b2f90404fULL; -static const uint64_t k3 = 0xc949d7c7509e6557ULL; +static constexpr uint64_t k0 = 0xc3a5c85c97cb3127ULL; +static constexpr uint64_t k1 = 0xb492b66fbe98f273ULL; +static constexpr uint64_t k2 = 0x9ae16a3b2f90404fULL; +static constexpr uint64_t k3 = 0xc949d7c7509e6557ULL; /// Bitwise right rotate. /// Normally this will compile to a single instruction, especially if the @@ -360,7 +359,7 @@ template <typename T, typename U> struct is_hashable_data<std::pair<T, U> > /// Helper to get the hashable data representation for a type. /// This variant is enabled when the type itself can be used. template <typename T> -typename std::enable_if<is_hashable_data<T>::value, T>::type +std::enable_if_t<is_hashable_data<T>::value, T> get_hashable_data(const T &value) { return value; } @@ -368,7 +367,7 @@ get_hashable_data(const T &value) { /// This variant is enabled when we must first call hash_value and use the /// result as our data. template <typename T> -typename std::enable_if<!is_hashable_data<T>::value, size_t>::type +std::enable_if_t<!is_hashable_data<T>::value, size_t> get_hashable_data(const T &value) { using ::llvm::hash_value; return hash_value(value); @@ -442,7 +441,7 @@ hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) { /// are stored in contiguous memory, this routine avoids copying each value /// and directly reads from the underlying memory. template <typename ValueT> -typename std::enable_if<is_hashable_data<ValueT>::value, hash_code>::type +std::enable_if_t<is_hashable_data<ValueT>::value, hash_code> hash_combine_range_impl(ValueT *first, ValueT *last) { const uint64_t seed = get_execution_seed(); const char *s_begin = reinterpret_cast<const char *>(first); @@ -627,8 +626,7 @@ inline hash_code hash_integer_value(uint64_t value) { // Declared and documented above, but defined here so that any of the hashing // infrastructure is available. template <typename T> -typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type -hash_value(T value) { +std::enable_if_t<is_integral_or_enum<T>::value, hash_code> hash_value(T value) { return ::llvm::hashing::detail::hash_integer_value( static_cast<uint64_t>(value)); } diff --git a/llvm/include/llvm/ADT/ImmutableMap.h b/llvm/include/llvm/ADT/ImmutableMap.h index 86fd7fefaec3..30689d2274a8 100644 --- a/llvm/include/llvm/ADT/ImmutableMap.h +++ b/llvm/include/llvm/ADT/ImmutableMap.h @@ -70,33 +70,14 @@ public: using TreeTy = ImutAVLTree<ValInfo>; protected: - TreeTy* Root; + IntrusiveRefCntPtr<TreeTy> Root; public: /// Constructs a map from a pointer to a tree root. In general one /// should use a Factory object to create maps instead of directly /// invoking the constructor, but there are cases where make this /// constructor public is useful. - explicit ImmutableMap(const TreeTy* R) : Root(const_cast<TreeTy*>(R)) { - if (Root) { Root->retain(); } - } - - ImmutableMap(const ImmutableMap &X) : Root(X.Root) { - if (Root) { Root->retain(); } - } - - ~ImmutableMap() { - if (Root) { Root->release(); } - } - - ImmutableMap &operator=(const ImmutableMap &X) { - if (Root != X.Root) { - if (X.Root) { X.Root->retain(); } - if (Root) { Root->release(); } - Root = X.Root; - } - return *this; - } + explicit ImmutableMap(const TreeTy *R) : Root(const_cast<TreeTy *>(R)) {} class Factory { typename TreeTy::Factory F; @@ -115,12 +96,12 @@ public: LLVM_NODISCARD ImmutableMap add(ImmutableMap Old, key_type_ref K, data_type_ref D) { - TreeTy *T = F.add(Old.Root, std::pair<key_type,data_type>(K,D)); + TreeTy *T = F.add(Old.Root.get(), std::pair<key_type, data_type>(K, D)); return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T); } LLVM_NODISCARD ImmutableMap remove(ImmutableMap Old, key_type_ref K) { - TreeTy *T = F.remove(Old.Root,K); + TreeTy *T = F.remove(Old.Root.get(), K); return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T); } @@ -134,19 +115,20 @@ public: } bool operator==(const ImmutableMap &RHS) const { - return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root; + return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root; } bool operator!=(const ImmutableMap &RHS) const { - return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root; + return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get()) + : Root != RHS.Root; } TreeTy *getRoot() const { if (Root) { Root->retain(); } - return Root; + return Root.get(); } - TreeTy *getRootWithoutRetain() const { return Root; } + TreeTy *getRootWithoutRetain() const { return Root.get(); } void manualRetain() { if (Root) Root->retain(); @@ -217,7 +199,7 @@ public: data_type_ref getData() const { return (*this)->second; } }; - iterator begin() const { return iterator(Root); } + iterator begin() const { return iterator(Root.get()); } iterator end() const { return iterator(); } data_type* lookup(key_type_ref K) const { @@ -243,7 +225,7 @@ public: unsigned getHeight() const { return Root ? Root->getHeight() : 0; } static inline void Profile(FoldingSetNodeID& ID, const ImmutableMap& M) { - ID.AddPointer(M.Root); + ID.AddPointer(M.Root.get()); } inline void Profile(FoldingSetNodeID& ID) const { @@ -266,7 +248,7 @@ public: using FactoryTy = typename TreeTy::Factory; protected: - TreeTy *Root; + IntrusiveRefCntPtr<TreeTy> Root; FactoryTy *Factory; public: @@ -274,44 +256,12 @@ public: /// should use a Factory object to create maps instead of directly /// invoking the constructor, but there are cases where make this /// constructor public is useful. - explicit ImmutableMapRef(const TreeTy *R, FactoryTy *F) - : Root(const_cast<TreeTy *>(R)), Factory(F) { - if (Root) { - Root->retain(); - } - } + ImmutableMapRef(const TreeTy *R, FactoryTy *F) + : Root(const_cast<TreeTy *>(R)), Factory(F) {} - explicit ImmutableMapRef(const ImmutableMap<KeyT, ValT> &X, - typename ImmutableMap<KeyT, ValT>::Factory &F) - : Root(X.getRootWithoutRetain()), - Factory(F.getTreeFactory()) { - if (Root) { Root->retain(); } - } - - ImmutableMapRef(const ImmutableMapRef &X) : Root(X.Root), Factory(X.Factory) { - if (Root) { - Root->retain(); - } - } - - ~ImmutableMapRef() { - if (Root) - Root->release(); - } - - ImmutableMapRef &operator=(const ImmutableMapRef &X) { - if (Root != X.Root) { - if (X.Root) - X.Root->retain(); - - if (Root) - Root->release(); - - Root = X.Root; - Factory = X.Factory; - } - return *this; - } + ImmutableMapRef(const ImmutableMap<KeyT, ValT> &X, + typename ImmutableMap<KeyT, ValT>::Factory &F) + : Root(X.getRootWithoutRetain()), Factory(F.getTreeFactory()) {} static inline ImmutableMapRef getEmptyMap(FactoryTy *F) { return ImmutableMapRef(0, F); @@ -326,12 +276,13 @@ public: } ImmutableMapRef add(key_type_ref K, data_type_ref D) const { - TreeTy *NewT = Factory->add(Root, std::pair<key_type, data_type>(K, D)); + TreeTy *NewT = + Factory->add(Root.get(), std::pair<key_type, data_type>(K, D)); return ImmutableMapRef(NewT, Factory); } ImmutableMapRef remove(key_type_ref K) const { - TreeTy *NewT = Factory->remove(Root, K); + TreeTy *NewT = Factory->remove(Root.get(), K); return ImmutableMapRef(NewT, Factory); } @@ -340,15 +291,16 @@ public: } ImmutableMap<KeyT, ValT> asImmutableMap() const { - return ImmutableMap<KeyT, ValT>(Factory->getCanonicalTree(Root)); + return ImmutableMap<KeyT, ValT>(Factory->getCanonicalTree(Root.get())); } bool operator==(const ImmutableMapRef &RHS) const { - return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root; + return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root; } bool operator!=(const ImmutableMapRef &RHS) const { - return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root; + return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get()) + : Root != RHS.Root; } bool isEmpty() const { return !Root; } @@ -377,7 +329,7 @@ public: data_type_ref getData() const { return (*this)->second; } }; - iterator begin() const { return iterator(Root); } + iterator begin() const { return iterator(Root.get()); } iterator end() const { return iterator(); } data_type *lookup(key_type_ref K) const { diff --git a/llvm/include/llvm/ADT/ImmutableSet.h b/llvm/include/llvm/ADT/ImmutableSet.h index a6a6abfd9600..f19913f8dcdd 100644 --- a/llvm/include/llvm/ADT/ImmutableSet.h +++ b/llvm/include/llvm/ADT/ImmutableSet.h @@ -15,6 +15,7 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/FoldingSet.h" +#include "llvm/ADT/IntrusiveRefCntPtr.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator.h" #include "llvm/Support/Allocator.h" @@ -169,7 +170,7 @@ public: bool contains(key_type_ref K) { return (bool) find(K); } /// foreach - A member template the accepts invokes operator() on a functor - /// object (specifed by Callback) for every node/subtree in the tree. + /// object (specified by Callback) for every node/subtree in the tree. /// Nodes are visited using an inorder traversal. template <typename Callback> void foreach(Callback& C) { @@ -183,7 +184,7 @@ public: } /// validateTree - A utility method that checks that the balancing and - /// ordering invariants of the tree are satisifed. It is a recursive + /// ordering invariants of the tree are satisfied. It is a recursive /// method that returns the height of the tree, which is then consumed /// by the enclosing validateTree call. External callers should ignore the /// return value. An invalid tree will cause an assertion to fire in @@ -357,6 +358,12 @@ public: } }; +template <typename ImutInfo> +struct IntrusiveRefCntPtrInfo<ImutAVLTree<ImutInfo>> { + static void retain(ImutAVLTree<ImutInfo> *Tree) { Tree->retain(); } + static void release(ImutAVLTree<ImutInfo> *Tree) { Tree->release(); } +}; + //===----------------------------------------------------------------------===// // Immutable AVL-Tree Factory class. //===----------------------------------------------------------------------===// @@ -450,7 +457,7 @@ protected: //===--------------------------------------------------===// // "createNode" is used to generate new tree roots that link - // to other trees. The functon may also simply move links + // to other trees. The function may also simply move links // in an existing root if that root is still marked mutable. // This is necessary because otherwise our balancing code // would leak memory as it would create nodes that are @@ -961,33 +968,14 @@ public: using TreeTy = ImutAVLTree<ValInfo>; private: - TreeTy *Root; + IntrusiveRefCntPtr<TreeTy> Root; public: /// Constructs a set from a pointer to a tree root. In general one /// should use a Factory object to create sets instead of directly /// invoking the constructor, but there are cases where make this /// constructor public is useful. - explicit ImmutableSet(TreeTy* R) : Root(R) { - if (Root) { Root->retain(); } - } - - ImmutableSet(const ImmutableSet &X) : Root(X.Root) { - if (Root) { Root->retain(); } - } - - ~ImmutableSet() { - if (Root) { Root->release(); } - } - - ImmutableSet &operator=(const ImmutableSet &X) { - if (Root != X.Root) { - if (X.Root) { X.Root->retain(); } - if (Root) { Root->release(); } - Root = X.Root; - } - return *this; - } + explicit ImmutableSet(TreeTy *R) : Root(R) {} class Factory { typename TreeTy::Factory F; @@ -1016,7 +1004,7 @@ public: /// The memory allocated to represent the set is released when the /// factory object that created the set is destroyed. LLVM_NODISCARD ImmutableSet add(ImmutableSet Old, value_type_ref V) { - TreeTy *NewT = F.add(Old.Root, V); + TreeTy *NewT = F.add(Old.Root.get(), V); return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT); } @@ -1028,7 +1016,7 @@ public: /// The memory allocated to represent the set is released when the /// factory object that created the set is destroyed. LLVM_NODISCARD ImmutableSet remove(ImmutableSet Old, value_type_ref V) { - TreeTy *NewT = F.remove(Old.Root, V); + TreeTy *NewT = F.remove(Old.Root.get(), V); return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT); } @@ -1047,21 +1035,20 @@ public: } bool operator==(const ImmutableSet &RHS) const { - return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root; + return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root; } bool operator!=(const ImmutableSet &RHS) const { - return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root; + return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get()) + : Root != RHS.Root; } TreeTy *getRoot() { if (Root) { Root->retain(); } - return Root; + return Root.get(); } - TreeTy *getRootWithoutRetain() const { - return Root; - } + TreeTy *getRootWithoutRetain() const { return Root.get(); } /// isEmpty - Return true if the set contains no elements. bool isEmpty() const { return !Root; } @@ -1082,7 +1069,7 @@ public: using iterator = ImutAVLValueIterator<ImmutableSet>; - iterator begin() const { return iterator(Root); } + iterator begin() const { return iterator(Root.get()); } iterator end() const { return iterator(); } //===--------------------------------------------------===// @@ -1092,7 +1079,7 @@ public: unsigned getHeight() const { return Root ? Root->getHeight() : 0; } static void Profile(FoldingSetNodeID &ID, const ImmutableSet &S) { - ID.AddPointer(S.Root); + ID.AddPointer(S.Root.get()); } void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); } @@ -1114,7 +1101,7 @@ public: using FactoryTy = typename TreeTy::Factory; private: - TreeTy *Root; + IntrusiveRefCntPtr<TreeTy> Root; FactoryTy *Factory; public: @@ -1122,42 +1109,18 @@ public: /// should use a Factory object to create sets instead of directly /// invoking the constructor, but there are cases where make this /// constructor public is useful. - explicit ImmutableSetRef(TreeTy* R, FactoryTy *F) - : Root(R), - Factory(F) { - if (Root) { Root->retain(); } - } - - ImmutableSetRef(const ImmutableSetRef &X) - : Root(X.Root), - Factory(X.Factory) { - if (Root) { Root->retain(); } - } - - ~ImmutableSetRef() { - if (Root) { Root->release(); } - } - - ImmutableSetRef &operator=(const ImmutableSetRef &X) { - if (Root != X.Root) { - if (X.Root) { X.Root->retain(); } - if (Root) { Root->release(); } - Root = X.Root; - Factory = X.Factory; - } - return *this; - } + ImmutableSetRef(TreeTy *R, FactoryTy *F) : Root(R), Factory(F) {} static ImmutableSetRef getEmptySet(FactoryTy *F) { return ImmutableSetRef(0, F); } ImmutableSetRef add(value_type_ref V) { - return ImmutableSetRef(Factory->add(Root, V), Factory); + return ImmutableSetRef(Factory->add(Root.get(), V), Factory); } ImmutableSetRef remove(value_type_ref V) { - return ImmutableSetRef(Factory->remove(Root, V), Factory); + return ImmutableSetRef(Factory->remove(Root.get(), V), Factory); } /// Returns true if the set contains the specified value. @@ -1166,20 +1129,19 @@ public: } ImmutableSet<ValT> asImmutableSet(bool canonicalize = true) const { - return ImmutableSet<ValT>(canonicalize ? - Factory->getCanonicalTree(Root) : Root); + return ImmutableSet<ValT>( + canonicalize ? Factory->getCanonicalTree(Root.get()) : Root.get()); } - TreeTy *getRootWithoutRetain() const { - return Root; - } + TreeTy *getRootWithoutRetain() const { return Root.get(); } bool operator==(const ImmutableSetRef &RHS) const { - return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root; + return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root; } bool operator!=(const ImmutableSetRef &RHS) const { - return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root; + return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get()) + : Root != RHS.Root; } /// isEmpty - Return true if the set contains no elements. @@ -1195,7 +1157,7 @@ public: using iterator = ImutAVLValueIterator<ImmutableSetRef>; - iterator begin() const { return iterator(Root); } + iterator begin() const { return iterator(Root.get()); } iterator end() const { return iterator(); } //===--------------------------------------------------===// @@ -1205,7 +1167,7 @@ public: unsigned getHeight() const { return Root ? Root->getHeight() : 0; } static void Profile(FoldingSetNodeID &ID, const ImmutableSetRef &S) { - ID.AddPointer(S.Root); + ID.AddPointer(S.Root.get()); } void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); } diff --git a/llvm/include/llvm/ADT/IntervalMap.h b/llvm/include/llvm/ADT/IntervalMap.h index a02876ee77f3..db7804d0a551 100644 --- a/llvm/include/llvm/ADT/IntervalMap.h +++ b/llvm/include/llvm/ADT/IntervalMap.h @@ -491,7 +491,7 @@ class NodeRef { struct CacheAlignedPointerTraits { static inline void *getAsVoidPointer(void *P) { return P; } static inline void *getFromVoidPointer(void *P) { return P; } - enum { NumLowBitsAvailable = Log2CacheLine }; + static constexpr int NumLowBitsAvailable = Log2CacheLine; }; PointerIntPair<void*, Log2CacheLine, unsigned, CacheAlignedPointerTraits> pip; @@ -823,7 +823,7 @@ public: } /// reset - Reset cached information about node(Level) from subtree(Level -1). - /// @param Level 1..height. THe node to update after parent node changed. + /// @param Level 1..height. The node to update after parent node changed. void reset(unsigned Level) { path[Level] = Entry(subtree(Level - 1), offset(Level)); } @@ -884,7 +884,7 @@ public: } /// getLeftSibling - Get the left sibling node at Level, or a null NodeRef. - /// @param Level Get the sinbling to node(Level). + /// @param Level Get the sibling to node(Level). /// @return Left sibling, or NodeRef(). NodeRef getRightSibling(unsigned Level) const; @@ -1396,7 +1396,7 @@ public: setRoot(map->rootSize); } - /// preincrement - move to the next interval. + /// preincrement - Move to the next interval. const_iterator &operator++() { assert(valid() && "Cannot increment end()"); if (++path.leafOffset() == path.leafSize() && branched()) @@ -1404,14 +1404,14 @@ public: return *this; } - /// postincrement - Dont do that! + /// postincrement - Don't do that! const_iterator operator++(int) { const_iterator tmp = *this; operator++(); return tmp; } - /// predecrement - move to the previous interval. + /// predecrement - Move to the previous interval. const_iterator &operator--() { if (path.leafOffset() && (valid() || !branched())) --path.leafOffset(); @@ -1420,7 +1420,7 @@ public: return *this; } - /// postdecrement - Dont do that! + /// postdecrement - Don't do that! const_iterator operator--(int) { const_iterator tmp = *this; operator--(); diff --git a/llvm/include/llvm/ADT/Optional.h b/llvm/include/llvm/ADT/Optional.h index c84f9aa8b342..c64b82352397 100644 --- a/llvm/include/llvm/ADT/Optional.h +++ b/llvm/include/llvm/ADT/Optional.h @@ -269,7 +269,7 @@ public: /// Apply a function to the value if present; otherwise return None. template <class Function> - auto map(const Function &F) const + auto map(const Function &F) const LLVM_LVALUE_FUNCTION -> Optional<decltype(F(getValue()))> { if (*this) return F(getValue()); return None; diff --git a/llvm/include/llvm/ADT/PointerEmbeddedInt.h b/llvm/include/llvm/ADT/PointerEmbeddedInt.h index 3eb6edb03430..fbc48af79da1 100644 --- a/llvm/include/llvm/ADT/PointerEmbeddedInt.h +++ b/llvm/include/llvm/ADT/PointerEmbeddedInt.h @@ -94,7 +94,7 @@ struct PointerLikeTypeTraits<PointerEmbeddedInt<IntT, Bits>> { return T(reinterpret_cast<uintptr_t>(P), typename T::RawValueTag()); } - enum { NumLowBitsAvailable = T::Shift }; + static constexpr int NumLowBitsAvailable = T::Shift; }; // Teach DenseMap how to use PointerEmbeddedInt objects as keys if the Int type diff --git a/llvm/include/llvm/ADT/PointerIntPair.h b/llvm/include/llvm/ADT/PointerIntPair.h index fa6bf1504469..cb8b202c48b7 100644 --- a/llvm/include/llvm/ADT/PointerIntPair.h +++ b/llvm/include/llvm/ADT/PointerIntPair.h @@ -147,7 +147,7 @@ struct PointerIntPairInfo { "cannot use a pointer type that has all bits free"); static_assert(IntBits <= PtrTraits::NumLowBitsAvailable, "PointerIntPair with integer size too large for pointer"); - enum : uintptr_t { + enum MaskAndShiftConstants : uintptr_t { /// PointerBitMask - The bits that come from the pointer. PointerBitMask = ~(uintptr_t)(((intptr_t)1 << PtrTraits::NumLowBitsAvailable) - 1), @@ -235,7 +235,8 @@ struct PointerLikeTypeTraits< return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P); } - enum { NumLowBitsAvailable = PtrTraits::NumLowBitsAvailable - IntBits }; + static constexpr int NumLowBitsAvailable = + PtrTraits::NumLowBitsAvailable - IntBits; }; } // end namespace llvm diff --git a/llvm/include/llvm/ADT/PointerSumType.h b/llvm/include/llvm/ADT/PointerSumType.h index d467f83f58ac..a7ef774e205e 100644 --- a/llvm/include/llvm/ADT/PointerSumType.h +++ b/llvm/include/llvm/ADT/PointerSumType.h @@ -214,7 +214,7 @@ struct PointerSumTypeHelper : MemberTs... { LookupOverload(PointerSumTypeMember<N, PointerT, TraitsT> *); template <TagT N> static void LookupOverload(...); template <TagT N> struct Lookup { - // Compute a particular member type by resolving the lookup helper ovorload. + // Compute a particular member type by resolving the lookup helper overload. using MemberT = decltype( LookupOverload<N>(static_cast<PointerSumTypeHelper *>(nullptr))); diff --git a/llvm/include/llvm/ADT/PointerUnion.h b/llvm/include/llvm/ADT/PointerUnion.h index 40b7b000da40..6fecff8d756f 100644 --- a/llvm/include/llvm/ADT/PointerUnion.h +++ b/llvm/include/llvm/ADT/PointerUnion.h @@ -181,7 +181,7 @@ public: explicit operator bool() const { return !isNull(); } /// Test if the Union currently holds the type matching T. - template <typename T> int is() const { + template <typename T> bool is() const { constexpr int Index = pointer_union_detail::TypeIndex<T, PTs...>::Index; static_assert(Index < sizeof...(PTs), "PointerUnion::is<T> given type not in the union"); @@ -197,7 +197,7 @@ public: } /// Returns the current pointer if it is of the specified pointer type, - /// otherwises returns null. + /// otherwise returns null. template <typename T> T dyn_cast() const { if (is<T>()) return get<T>(); diff --git a/llvm/include/llvm/ADT/PostOrderIterator.h b/llvm/include/llvm/ADT/PostOrderIterator.h index 2fe7447a8e77..bb413a956d9f 100644 --- a/llvm/include/llvm/ADT/PostOrderIterator.h +++ b/llvm/include/llvm/ADT/PostOrderIterator.h @@ -18,6 +18,7 @@ #include "llvm/ADT/GraphTraits.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator_range.h" #include <iterator> #include <set> @@ -101,7 +102,7 @@ class po_iterator // VisitStack - Used to maintain the ordering. Top = current block // First element is basic block pointer, second is the 'next child' to visit - std::vector<std::pair<NodeRef, ChildItTy>> VisitStack; + SmallVector<std::pair<NodeRef, ChildItTy>, 8> VisitStack; po_iterator(NodeRef BB) { this->insertEdge(Optional<NodeRef>(), BB); diff --git a/llvm/include/llvm/ADT/PriorityWorklist.h b/llvm/include/llvm/ADT/PriorityWorklist.h index 96d22c87557e..01dd59a2e71a 100644 --- a/llvm/include/llvm/ADT/PriorityWorklist.h +++ b/llvm/include/llvm/ADT/PriorityWorklist.h @@ -110,7 +110,7 @@ public: /// Insert a sequence of new elements into the PriorityWorklist. template <typename SequenceT> - typename std::enable_if<!std::is_convertible<SequenceT, T>::value>::type + std::enable_if_t<!std::is_convertible<SequenceT, T>::value> insert(SequenceT &&Input) { if (std::begin(Input) == std::end(Input)) // Nothing to do for an empty input sequence. diff --git a/llvm/include/llvm/ADT/SCCIterator.h b/llvm/include/llvm/ADT/SCCIterator.h index 1e642b9f75d3..8a7c0a78a0fc 100644 --- a/llvm/include/llvm/ADT/SCCIterator.h +++ b/llvm/include/llvm/ADT/SCCIterator.h @@ -124,11 +124,11 @@ public: return CurrentSCC; } - /// Test if the current SCC has a loop. + /// Test if the current SCC has a cycle. /// /// If the SCC has more than one node, this is trivially true. If not, it may - /// still contain a loop if the node has an edge back to itself. - bool hasLoop() const; + /// still contain a cycle if the node has an edge back to itself. + bool hasCycle() const; /// This informs the \c scc_iterator that the specified \c Old node /// has been deleted, and \c New is to be used in its place. @@ -212,7 +212,7 @@ template <class GraphT, class GT> void scc_iterator<GraphT, GT>::GetNextSCC() { } template <class GraphT, class GT> -bool scc_iterator<GraphT, GT>::hasLoop() const { +bool scc_iterator<GraphT, GT>::hasCycle() const { assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!"); if (CurrentSCC.size() > 1) return true; diff --git a/llvm/include/llvm/ADT/STLExtras.h b/llvm/include/llvm/ADT/STLExtras.h index b61dab2459d1..50b688b36648 100644 --- a/llvm/include/llvm/ADT/STLExtras.h +++ b/llvm/include/llvm/ADT/STLExtras.h @@ -50,6 +50,10 @@ namespace detail { template <typename RangeT> using IterOfRange = decltype(std::begin(std::declval<RangeT &>())); +template <typename RangeT> +using ValueOfRange = typename std::remove_reference<decltype( + *std::begin(std::declval<RangeT &>()))>::type; + } // end namespace detail //===----------------------------------------------------------------------===// @@ -75,6 +79,79 @@ template <typename T> struct make_const_ref { typename std::add_const<T>::type>::type; }; +/// Utilities for detecting if a given trait holds for some set of arguments +/// 'Args'. For example, the given trait could be used to detect if a given type +/// has a copy assignment operator: +/// template<class T> +/// using has_copy_assign_t = decltype(std::declval<T&>() +/// = std::declval<const T&>()); +/// bool fooHasCopyAssign = is_detected<has_copy_assign_t, FooClass>::value; +namespace detail { +template <typename...> using void_t = void; +template <class, template <class...> class Op, class... Args> struct detector { + using value_t = std::false_type; +}; +template <template <class...> class Op, class... Args> +struct detector<void_t<Op<Args...>>, Op, Args...> { + using value_t = std::true_type; +}; +} // end namespace detail + +template <template <class...> class Op, class... Args> +using is_detected = typename detail::detector<void, Op, Args...>::value_t; + +/// Check if a Callable type can be invoked with the given set of arg types. +namespace detail { +template <typename Callable, typename... Args> +using is_invocable = + decltype(std::declval<Callable &>()(std::declval<Args>()...)); +} // namespace detail + +template <typename Callable, typename... Args> +using is_invocable = is_detected<detail::is_invocable, Callable, Args...>; + +/// This class provides various trait information about a callable object. +/// * To access the number of arguments: Traits::num_args +/// * To access the type of an argument: Traits::arg_t<Index> +/// * To access the type of the result: Traits::result_t +template <typename T, bool isClass = std::is_class<T>::value> +struct function_traits : public function_traits<decltype(&T::operator())> {}; + +/// Overload for class function types. +template <typename ClassType, typename ReturnType, typename... Args> +struct function_traits<ReturnType (ClassType::*)(Args...) const, false> { + /// The number of arguments to this function. + enum { num_args = sizeof...(Args) }; + + /// The result type of this function. + using result_t = ReturnType; + + /// The type of an argument to this function. + template <size_t Index> + using arg_t = typename std::tuple_element<Index, std::tuple<Args...>>::type; +}; +/// Overload for class function types. +template <typename ClassType, typename ReturnType, typename... Args> +struct function_traits<ReturnType (ClassType::*)(Args...), false> + : function_traits<ReturnType (ClassType::*)(Args...) const> {}; +/// Overload for non-class function types. +template <typename ReturnType, typename... Args> +struct function_traits<ReturnType (*)(Args...), false> { + /// The number of arguments to this function. + enum { num_args = sizeof...(Args) }; + + /// The result type of this function. + using result_t = ReturnType; + + /// The type of an argument to this function. + template <size_t i> + using arg_t = typename std::tuple_element<i, std::tuple<Args...>>::type; +}; +/// Overload for non-class function type references. +template <typename ReturnType, typename... Args> +struct function_traits<ReturnType (&)(Args...), false> + : public function_traits<ReturnType (*)(Args...)> {}; + //===----------------------------------------------------------------------===// // Extra additions to <functional> //===----------------------------------------------------------------------===// @@ -114,10 +191,11 @@ public: function_ref(std::nullptr_t) {} template <typename Callable> - function_ref(Callable &&callable, - typename std::enable_if< - !std::is_same<typename std::remove_reference<Callable>::type, - function_ref>::value>::type * = nullptr) + function_ref( + Callable &&callable, + std::enable_if_t< + !std::is_same<std::remove_cv_t<std::remove_reference_t<Callable>>, + function_ref>::value> * = nullptr) : callback(callback_fn<typename std::remove_reference<Callable>::type>), callable(reinterpret_cast<intptr_t>(&callable)) {} @@ -125,7 +203,7 @@ public: return callback(callable, std::forward<Params>(params)...); } - operator bool() const { return callback; } + explicit operator bool() const { return callback; } }; // deleter - Very very very simple method that is used to invoke operator @@ -146,16 +224,14 @@ namespace adl_detail { using std::begin; template <typename ContainerTy> -auto adl_begin(ContainerTy &&container) - -> decltype(begin(std::forward<ContainerTy>(container))) { +decltype(auto) adl_begin(ContainerTy &&container) { return begin(std::forward<ContainerTy>(container)); } using std::end; template <typename ContainerTy> -auto adl_end(ContainerTy &&container) - -> decltype(end(std::forward<ContainerTy>(container))) { +decltype(auto) adl_end(ContainerTy &&container) { return end(std::forward<ContainerTy>(container)); } @@ -170,14 +246,12 @@ void adl_swap(T &&lhs, T &&rhs) noexcept(noexcept(swap(std::declval<T>(), } // end namespace adl_detail template <typename ContainerTy> -auto adl_begin(ContainerTy &&container) - -> decltype(adl_detail::adl_begin(std::forward<ContainerTy>(container))) { +decltype(auto) adl_begin(ContainerTy &&container) { return adl_detail::adl_begin(std::forward<ContainerTy>(container)); } template <typename ContainerTy> -auto adl_end(ContainerTy &&container) - -> decltype(adl_detail::adl_end(std::forward<ContainerTy>(container))) { +decltype(auto) adl_end(ContainerTy &&container) { return adl_detail::adl_end(std::forward<ContainerTy>(container)); } @@ -193,11 +267,15 @@ constexpr bool empty(const T &RangeOrContainer) { return adl_begin(RangeOrContainer) == adl_end(RangeOrContainer); } +/// Returns true if the given container only contains a single element. +template <typename ContainerTy> bool hasSingleElement(ContainerTy &&C) { + auto B = std::begin(C), E = std::end(C); + return B != E && std::next(B) == E; +} + /// Return a range covering \p RangeOrContainer with the first N elements /// excluded. -template <typename T> -auto drop_begin(T &&RangeOrContainer, size_t N) -> - iterator_range<decltype(adl_begin(RangeOrContainer))> { +template <typename T> auto drop_begin(T &&RangeOrContainer, size_t N) { return make_range(std::next(adl_begin(RangeOrContainer), N), adl_end(RangeOrContainer)); } @@ -219,7 +297,7 @@ public: ItTy getCurrent() { return this->I; } - FuncReturnTy operator*() { return F(*this->I); } + FuncReturnTy operator*() const { return F(*this->I); } private: FuncTy F; @@ -233,9 +311,7 @@ inline mapped_iterator<ItTy, FuncTy> map_iterator(ItTy I, FuncTy F) { } template <class ContainerTy, class FuncTy> -auto map_range(ContainerTy &&C, FuncTy F) - -> decltype(make_range(map_iterator(C.begin(), F), - map_iterator(C.end(), F))) { +auto map_range(ContainerTy &&C, FuncTy F) { return make_range(map_iterator(C.begin(), F), map_iterator(C.end(), F)); } @@ -263,8 +339,7 @@ struct has_rbegin : has_rbegin_impl<typename std::remove_reference<Ty>::type> { // Note that the container must have rbegin()/rend() methods for this to work. template <typename ContainerTy> auto reverse(ContainerTy &&C, - typename std::enable_if<has_rbegin<ContainerTy>::value>::type * = - nullptr) -> decltype(make_range(C.rbegin(), C.rend())) { + std::enable_if_t<has_rbegin<ContainerTy>::value> * = nullptr) { return make_range(C.rbegin(), C.rend()); } @@ -278,11 +353,8 @@ std::reverse_iterator<IteratorTy> make_reverse_iterator(IteratorTy It) { // Note that the container must have begin()/end() methods which return // bidirectional iterators for this to work. template <typename ContainerTy> -auto reverse( - ContainerTy &&C, - typename std::enable_if<!has_rbegin<ContainerTy>::value>::type * = nullptr) - -> decltype(make_range(llvm::make_reverse_iterator(std::end(C)), - llvm::make_reverse_iterator(std::begin(C)))) { +auto reverse(ContainerTy &&C, + std::enable_if_t<!has_rbegin<ContainerTy>::value> * = nullptr) { return make_range(llvm::make_reverse_iterator(std::end(C)), llvm::make_reverse_iterator(std::begin(C))); } @@ -673,16 +745,15 @@ detail::zippy<detail::zip_first, T, U, Args...> zip_first(T &&t, U &&u, namespace detail { template <typename Iter> -static Iter next_or_end(const Iter &I, const Iter &End) { +Iter next_or_end(const Iter &I, const Iter &End) { if (I == End) return End; return std::next(I); } template <typename Iter> -static auto deref_or_none(const Iter &I, const Iter &End) - -> llvm::Optional<typename std::remove_const< - typename std::remove_reference<decltype(*I)>::type>::type> { +auto deref_or_none(const Iter &I, const Iter &End) -> llvm::Optional< + std::remove_const_t<std::remove_reference_t<decltype(*I)>>> { if (I == End) return None; return *I; @@ -887,7 +958,7 @@ class concat_iterator } public: - /// Constructs an iterator from a squence of ranges. + /// Constructs an iterator from a sequence of ranges. /// /// We need the full range to know how to switch between each of the /// iterators. @@ -956,6 +1027,234 @@ detail::concat_range<ValueT, RangeTs...> concat(RangeTs &&... Ranges) { std::forward<RangeTs>(Ranges)...); } +/// A utility class used to implement an iterator that contains some base object +/// and an index. The iterator moves the index but keeps the base constant. +template <typename DerivedT, typename BaseT, typename T, + typename PointerT = T *, typename ReferenceT = T &> +class indexed_accessor_iterator + : public llvm::iterator_facade_base<DerivedT, + std::random_access_iterator_tag, T, + std::ptrdiff_t, PointerT, ReferenceT> { +public: + ptrdiff_t operator-(const indexed_accessor_iterator &rhs) const { + assert(base == rhs.base && "incompatible iterators"); + return index - rhs.index; + } + bool operator==(const indexed_accessor_iterator &rhs) const { + return base == rhs.base && index == rhs.index; + } + bool operator<(const indexed_accessor_iterator &rhs) const { + assert(base == rhs.base && "incompatible iterators"); + return index < rhs.index; + } + + DerivedT &operator+=(ptrdiff_t offset) { + this->index += offset; + return static_cast<DerivedT &>(*this); + } + DerivedT &operator-=(ptrdiff_t offset) { + this->index -= offset; + return static_cast<DerivedT &>(*this); + } + + /// Returns the current index of the iterator. + ptrdiff_t getIndex() const { return index; } + + /// Returns the current base of the iterator. + const BaseT &getBase() const { return base; } + +protected: + indexed_accessor_iterator(BaseT base, ptrdiff_t index) + : base(base), index(index) {} + BaseT base; + ptrdiff_t index; +}; + +namespace detail { +/// The class represents the base of a range of indexed_accessor_iterators. It +/// provides support for many different range functionalities, e.g. +/// drop_front/slice/etc.. Derived range classes must implement the following +/// static methods: +/// * ReferenceT dereference_iterator(const BaseT &base, ptrdiff_t index) +/// - Dereference an iterator pointing to the base object at the given +/// index. +/// * BaseT offset_base(const BaseT &base, ptrdiff_t index) +/// - Return a new base that is offset from the provide base by 'index' +/// elements. +template <typename DerivedT, typename BaseT, typename T, + typename PointerT = T *, typename ReferenceT = T &> +class indexed_accessor_range_base { +public: + using RangeBaseT = + indexed_accessor_range_base<DerivedT, BaseT, T, PointerT, ReferenceT>; + + /// An iterator element of this range. + class iterator : public indexed_accessor_iterator<iterator, BaseT, T, + PointerT, ReferenceT> { + public: + // Index into this iterator, invoking a static method on the derived type. + ReferenceT operator*() const { + return DerivedT::dereference_iterator(this->getBase(), this->getIndex()); + } + + private: + iterator(BaseT owner, ptrdiff_t curIndex) + : indexed_accessor_iterator<iterator, BaseT, T, PointerT, ReferenceT>( + owner, curIndex) {} + + /// Allow access to the constructor. + friend indexed_accessor_range_base<DerivedT, BaseT, T, PointerT, + ReferenceT>; + }; + + indexed_accessor_range_base(iterator begin, iterator end) + : base(offset_base(begin.getBase(), begin.getIndex())), + count(end.getIndex() - begin.getIndex()) {} + indexed_accessor_range_base(const iterator_range<iterator> &range) + : indexed_accessor_range_base(range.begin(), range.end()) {} + indexed_accessor_range_base(BaseT base, ptrdiff_t count) + : base(base), count(count) {} + + iterator begin() const { return iterator(base, 0); } + iterator end() const { return iterator(base, count); } + ReferenceT operator[](unsigned index) const { + assert(index < size() && "invalid index for value range"); + return DerivedT::dereference_iterator(base, index); + } + ReferenceT front() const { + assert(!empty() && "expected non-empty range"); + return (*this)[0]; + } + ReferenceT back() const { + assert(!empty() && "expected non-empty range"); + return (*this)[size() - 1]; + } + + /// Compare this range with another. + template <typename OtherT> bool operator==(const OtherT &other) const { + return size() == + static_cast<size_t>(std::distance(other.begin(), other.end())) && + std::equal(begin(), end(), other.begin()); + } + template <typename OtherT> bool operator!=(const OtherT &other) const { + return !(*this == other); + } + + /// Return the size of this range. + size_t size() const { return count; } + + /// Return if the range is empty. + bool empty() const { return size() == 0; } + + /// Drop the first N elements, and keep M elements. + DerivedT slice(size_t n, size_t m) const { + assert(n + m <= size() && "invalid size specifiers"); + return DerivedT(offset_base(base, n), m); + } + + /// Drop the first n elements. + DerivedT drop_front(size_t n = 1) const { + assert(size() >= n && "Dropping more elements than exist"); + return slice(n, size() - n); + } + /// Drop the last n elements. + DerivedT drop_back(size_t n = 1) const { + assert(size() >= n && "Dropping more elements than exist"); + return DerivedT(base, size() - n); + } + + /// Take the first n elements. + DerivedT take_front(size_t n = 1) const { + return n < size() ? drop_back(size() - n) + : static_cast<const DerivedT &>(*this); + } + + /// Take the last n elements. + DerivedT take_back(size_t n = 1) const { + return n < size() ? drop_front(size() - n) + : static_cast<const DerivedT &>(*this); + } + + /// Allow conversion to any type accepting an iterator_range. + template <typename RangeT, typename = std::enable_if_t<std::is_constructible< + RangeT, iterator_range<iterator>>::value>> + operator RangeT() const { + return RangeT(iterator_range<iterator>(*this)); + } + + /// Returns the base of this range. + const BaseT &getBase() const { return base; } + +private: + /// Offset the given base by the given amount. + static BaseT offset_base(const BaseT &base, size_t n) { + return n == 0 ? base : DerivedT::offset_base(base, n); + } + +protected: + indexed_accessor_range_base(const indexed_accessor_range_base &) = default; + indexed_accessor_range_base(indexed_accessor_range_base &&) = default; + indexed_accessor_range_base & + operator=(const indexed_accessor_range_base &) = default; + + /// The base that owns the provided range of values. + BaseT base; + /// The size from the owning range. + ptrdiff_t count; +}; +} // end namespace detail + +/// This class provides an implementation of a range of +/// indexed_accessor_iterators where the base is not indexable. Ranges with +/// bases that are offsetable should derive from indexed_accessor_range_base +/// instead. Derived range classes are expected to implement the following +/// static method: +/// * ReferenceT dereference(const BaseT &base, ptrdiff_t index) +/// - Dereference an iterator pointing to a parent base at the given index. +template <typename DerivedT, typename BaseT, typename T, + typename PointerT = T *, typename ReferenceT = T &> +class indexed_accessor_range + : public detail::indexed_accessor_range_base< + DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT> { +public: + indexed_accessor_range(BaseT base, ptrdiff_t startIndex, ptrdiff_t count) + : detail::indexed_accessor_range_base< + DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT>( + std::make_pair(base, startIndex), count) {} + using detail::indexed_accessor_range_base< + DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, + ReferenceT>::indexed_accessor_range_base; + + /// Returns the current base of the range. + const BaseT &getBase() const { return this->base.first; } + + /// Returns the current start index of the range. + ptrdiff_t getStartIndex() const { return this->base.second; } + + /// See `detail::indexed_accessor_range_base` for details. + static std::pair<BaseT, ptrdiff_t> + offset_base(const std::pair<BaseT, ptrdiff_t> &base, ptrdiff_t index) { + // We encode the internal base as a pair of the derived base and a start + // index into the derived base. + return std::make_pair(base.first, base.second + index); + } + /// See `detail::indexed_accessor_range_base` for details. + static ReferenceT + dereference_iterator(const std::pair<BaseT, ptrdiff_t> &base, + ptrdiff_t index) { + return DerivedT::dereference(base.first, base.second + index); + } +}; + +/// Given a container of pairs, return a range over the second elements. +template <typename ContainerTy> auto make_second_range(ContainerTy &&c) { + return llvm::map_range( + std::forward<ContainerTy>(c), + [](decltype((*std::begin(c))) elt) -> decltype((elt.second)) { + return elt.second; + }); +} + //===----------------------------------------------------------------------===// // Extra additions to <utility> //===----------------------------------------------------------------------===// @@ -983,8 +1282,7 @@ struct on_first { FuncTy func; template <typename T> - auto operator()(const T &lhs, const T &rhs) const - -> decltype(func(lhs.first, rhs.first)) { + decltype(auto) operator()(const T &lhs, const T &rhs) const { return func(lhs.first, rhs.first); } }; @@ -1022,6 +1320,16 @@ struct are_base_of<T, U, Ts...> { // Extra additions for arrays //===----------------------------------------------------------------------===// +// We have a copy here so that LLVM behaves the same when using different +// standard libraries. +template <class Iterator, class RNG> +void shuffle(Iterator first, Iterator last, RNG &&g) { + // It would be better to use a std::uniform_int_distribution, + // but that would be stdlib dependent. + for (auto size = last - first; size > 1; ++first, (void)--size) + std::iter_swap(first, first + g() % size); +} + /// Find the length of an array. template <class T, std::size_t N> constexpr inline size_t array_lengthof(T (&)[N]) { @@ -1108,9 +1416,20 @@ inline void array_pod_sort( reinterpret_cast<int (*)(const void *, const void *)>(Compare)); } +namespace detail { +template <typename T> +// We can use qsort if the iterator type is a pointer and the underlying value +// is trivially copyable. +using sort_trivially_copyable = conjunction< + std::is_pointer<T>, + is_trivially_copyable<typename std::iterator_traits<T>::value_type>>; +} // namespace detail + // Provide wrappers to std::sort which shuffle the elements before sorting // to help uncover non-deterministic behavior (PR35135). -template <typename IteratorTy> +template <typename IteratorTy, + std::enable_if_t<!detail::sort_trivially_copyable<IteratorTy>::value, + int> = 0> inline void sort(IteratorTy Start, IteratorTy End) { #ifdef EXPENSIVE_CHECKS detail::presortShuffle<IteratorTy>(Start, End); @@ -1118,6 +1437,15 @@ inline void sort(IteratorTy Start, IteratorTy End) { std::sort(Start, End); } +// Forward trivially copyable types to array_pod_sort. This avoids a large +// amount of code bloat for a minor performance hit. +template <typename IteratorTy, + std::enable_if_t<detail::sort_trivially_copyable<IteratorTy>::value, + int> = 0> +inline void sort(IteratorTy Start, IteratorTy End) { + array_pod_sort(Start, End); +} + template <typename Container> inline void sort(Container &&C) { llvm::sort(adl_begin(C), adl_end(C)); } @@ -1139,33 +1467,14 @@ inline void sort(Container &&C, Compare Comp) { // Extra additions to <algorithm> //===----------------------------------------------------------------------===// -/// For a container of pointers, deletes the pointers and then clears the -/// container. -template<typename Container> -void DeleteContainerPointers(Container &C) { - for (auto V : C) - delete V; - C.clear(); -} - -/// In a container of pairs (usually a map) whose second element is a pointer, -/// deletes the second elements and then clears the container. -template<typename Container> -void DeleteContainerSeconds(Container &C) { - for (auto &V : C) - delete V.second; - C.clear(); -} - /// Get the size of a range. This is a wrapper function around std::distance /// which is only enabled when the operation is O(1). template <typename R> -auto size(R &&Range, typename std::enable_if< - std::is_same<typename std::iterator_traits<decltype( - Range.begin())>::iterator_category, - std::random_access_iterator_tag>::value, - void>::type * = nullptr) - -> decltype(std::distance(Range.begin(), Range.end())) { +auto size(R &&Range, + std::enable_if_t<std::is_same<typename std::iterator_traits<decltype( + Range.begin())>::iterator_category, + std::random_access_iterator_tag>::value, + void> * = nullptr) { return std::distance(Range.begin(), Range.end()); } @@ -1199,27 +1508,26 @@ bool none_of(R &&Range, UnaryPredicate P) { /// Provide wrappers to std::find which take ranges instead of having to pass /// begin/end explicitly. -template <typename R, typename T> -auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range)) { +template <typename R, typename T> auto find(R &&Range, const T &Val) { return std::find(adl_begin(Range), adl_end(Range), Val); } /// Provide wrappers to std::find_if which take ranges instead of having to pass /// begin/end explicitly. template <typename R, typename UnaryPredicate> -auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) { +auto find_if(R &&Range, UnaryPredicate P) { return std::find_if(adl_begin(Range), adl_end(Range), P); } template <typename R, typename UnaryPredicate> -auto find_if_not(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) { +auto find_if_not(R &&Range, UnaryPredicate P) { return std::find_if_not(adl_begin(Range), adl_end(Range), P); } /// Provide wrappers to std::remove_if which take ranges instead of having to /// pass begin/end explicitly. template <typename R, typename UnaryPredicate> -auto remove_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) { +auto remove_if(R &&Range, UnaryPredicate P) { return std::remove_if(adl_begin(Range), adl_end(Range), P); } @@ -1242,19 +1550,28 @@ bool is_contained(R &&Range, const E &Element) { return std::find(adl_begin(Range), adl_end(Range), Element) != adl_end(Range); } +/// Wrapper function around std::is_sorted to check if elements in a range \p R +/// are sorted with respect to a comparator \p C. +template <typename R, typename Compare> bool is_sorted(R &&Range, Compare C) { + return std::is_sorted(adl_begin(Range), adl_end(Range), C); +} + +/// Wrapper function around std::is_sorted to check if elements in a range \p R +/// are sorted in non-descending order. +template <typename R> bool is_sorted(R &&Range) { + return std::is_sorted(adl_begin(Range), adl_end(Range)); +} + /// Wrapper function around std::count to count the number of times an element /// \p Element occurs in the given range \p Range. -template <typename R, typename E> -auto count(R &&Range, const E &Element) -> - typename std::iterator_traits<decltype(adl_begin(Range))>::difference_type { +template <typename R, typename E> auto count(R &&Range, const E &Element) { return std::count(adl_begin(Range), adl_end(Range), Element); } /// Wrapper function around std::count_if to count the number of times an /// element satisfying a given predicate occurs in a range. template <typename R, typename UnaryPredicate> -auto count_if(R &&Range, UnaryPredicate P) -> - typename std::iterator_traits<decltype(adl_begin(Range))>::difference_type { +auto count_if(R &&Range, UnaryPredicate P) { return std::count_if(adl_begin(Range), adl_end(Range), P); } @@ -1268,36 +1585,32 @@ OutputIt transform(R &&Range, OutputIt d_first, UnaryPredicate P) { /// Provide wrappers to std::partition which take ranges instead of having to /// pass begin/end explicitly. template <typename R, typename UnaryPredicate> -auto partition(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) { +auto partition(R &&Range, UnaryPredicate P) { return std::partition(adl_begin(Range), adl_end(Range), P); } /// Provide wrappers to std::lower_bound which take ranges instead of having to /// pass begin/end explicitly. -template <typename R, typename T> -auto lower_bound(R &&Range, T &&Value) -> decltype(adl_begin(Range)) { +template <typename R, typename T> auto lower_bound(R &&Range, T &&Value) { return std::lower_bound(adl_begin(Range), adl_end(Range), std::forward<T>(Value)); } template <typename R, typename T, typename Compare> -auto lower_bound(R &&Range, T &&Value, Compare C) - -> decltype(adl_begin(Range)) { +auto lower_bound(R &&Range, T &&Value, Compare C) { return std::lower_bound(adl_begin(Range), adl_end(Range), std::forward<T>(Value), C); } /// Provide wrappers to std::upper_bound which take ranges instead of having to /// pass begin/end explicitly. -template <typename R, typename T> -auto upper_bound(R &&Range, T &&Value) -> decltype(adl_begin(Range)) { +template <typename R, typename T> auto upper_bound(R &&Range, T &&Value) { return std::upper_bound(adl_begin(Range), adl_end(Range), std::forward<T>(Value)); } template <typename R, typename T, typename Compare> -auto upper_bound(R &&Range, T &&Value, Compare C) - -> decltype(adl_begin(Range)) { +auto upper_bound(R &&Range, T &&Value, Compare C) { return std::upper_bound(adl_begin(Range), adl_end(Range), std::forward<T>(Value), C); } @@ -1316,7 +1629,7 @@ void stable_sort(R &&Range, Compare C) { /// Requires that C is always true below some limit, and always false above it. template <typename R, typename Predicate, typename Val = decltype(*adl_begin(std::declval<R>()))> -auto partition_point(R &&Range, Predicate P) -> decltype(adl_begin(Range)) { +auto partition_point(R &&Range, Predicate P) { return std::partition_point(adl_begin(Range), adl_end(Range), P); } @@ -1368,6 +1681,69 @@ void replace(Container &Cont, typename Container::iterator ContIt, replace(Cont, ContIt, ContEnd, R.begin(), R.end()); } +/// An STL-style algorithm similar to std::for_each that applies a second +/// functor between every pair of elements. +/// +/// This provides the control flow logic to, for example, print a +/// comma-separated list: +/// \code +/// interleave(names.begin(), names.end(), +/// [&](StringRef name) { os << name; }, +/// [&] { os << ", "; }); +/// \endcode +template <typename ForwardIterator, typename UnaryFunctor, + typename NullaryFunctor, + typename = typename std::enable_if< + !std::is_constructible<StringRef, UnaryFunctor>::value && + !std::is_constructible<StringRef, NullaryFunctor>::value>::type> +inline void interleave(ForwardIterator begin, ForwardIterator end, + UnaryFunctor each_fn, NullaryFunctor between_fn) { + if (begin == end) + return; + each_fn(*begin); + ++begin; + for (; begin != end; ++begin) { + between_fn(); + each_fn(*begin); + } +} + +template <typename Container, typename UnaryFunctor, typename NullaryFunctor, + typename = typename std::enable_if< + !std::is_constructible<StringRef, UnaryFunctor>::value && + !std::is_constructible<StringRef, NullaryFunctor>::value>::type> +inline void interleave(const Container &c, UnaryFunctor each_fn, + NullaryFunctor between_fn) { + interleave(c.begin(), c.end(), each_fn, between_fn); +} + +/// Overload of interleave for the common case of string separator. +template <typename Container, typename UnaryFunctor, typename StreamT, + typename T = detail::ValueOfRange<Container>> +inline void interleave(const Container &c, StreamT &os, UnaryFunctor each_fn, + const StringRef &separator) { + interleave(c.begin(), c.end(), each_fn, [&] { os << separator; }); +} +template <typename Container, typename StreamT, + typename T = detail::ValueOfRange<Container>> +inline void interleave(const Container &c, StreamT &os, + const StringRef &separator) { + interleave( + c, os, [&](const T &a) { os << a; }, separator); +} + +template <typename Container, typename UnaryFunctor, typename StreamT, + typename T = detail::ValueOfRange<Container>> +inline void interleaveComma(const Container &c, StreamT &os, + UnaryFunctor each_fn) { + interleave(c, os, each_fn, ", "); +} +template <typename Container, typename StreamT, + typename T = detail::ValueOfRange<Container>> +inline void interleaveComma(const Container &c, StreamT &os) { + interleaveComma(c, os, [&](const T &a) { os << a; }); +} + //===----------------------------------------------------------------------===// // Extra additions to <memory> //===----------------------------------------------------------------------===// @@ -1393,8 +1769,7 @@ template <typename T> struct deref { // Could be further improved to cope with non-derivable functors and // non-binary functors (should be a variadic template member function // operator()). - template <typename A, typename B> - auto operator()(A &lhs, B &rhs) const -> decltype(func(*lhs, *rhs)) { + template <typename A, typename B> auto operator()(A &lhs, B &rhs) const { assert(lhs); assert(rhs); return func(*lhs, *rhs); @@ -1515,8 +1890,7 @@ template <typename R> detail::enumerator<R> enumerate(R &&TheRange) { namespace detail { template <typename F, typename Tuple, std::size_t... I> -auto apply_tuple_impl(F &&f, Tuple &&t, std::index_sequence<I...>) - -> decltype(std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...)) { +decltype(auto) apply_tuple_impl(F &&f, Tuple &&t, std::index_sequence<I...>) { return std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...); } @@ -1526,10 +1900,7 @@ auto apply_tuple_impl(F &&f, Tuple &&t, std::index_sequence<I...>) /// tuple variadically to f as if by calling f(a1, a2, ..., an) and /// return the result. template <typename F, typename Tuple> -auto apply_tuple(F &&f, Tuple &&t) -> decltype(detail::apply_tuple_impl( - std::forward<F>(f), std::forward<Tuple>(t), - std::make_index_sequence< - std::tuple_size<typename std::decay<Tuple>::type>::value>{})) { +decltype(auto) apply_tuple(F &&f, Tuple &&t) { using Indices = std::make_index_sequence< std::tuple_size<typename std::decay<Tuple>::type>::value>; @@ -1539,49 +1910,89 @@ auto apply_tuple(F &&f, Tuple &&t) -> decltype(detail::apply_tuple_impl( /// Return true if the sequence [Begin, End) has exactly N items. Runs in O(N) /// time. Not meant for use with random-access iterators. -template <typename IterTy> +/// Can optionally take a predicate to filter lazily some items. +template<typename IterTy, + typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)> bool hasNItems( IterTy &&Begin, IterTy &&End, unsigned N, - typename std::enable_if< - !std::is_same< - typename std::iterator_traits<typename std::remove_reference< - decltype(Begin)>::type>::iterator_category, - std::random_access_iterator_tag>::value, - void>::type * = nullptr) { - for (; N; --N, ++Begin) + Pred &&ShouldBeCounted = + [](const decltype(*std::declval<IterTy>()) &) { return true; }, + std::enable_if_t< + !std::is_same<typename std::iterator_traits<std::remove_reference_t< + decltype(Begin)>>::iterator_category, + std::random_access_iterator_tag>::value, + void> * = nullptr) { + for (; N; ++Begin) { if (Begin == End) return false; // Too few. - return Begin == End; + N -= ShouldBeCounted(*Begin); + } + for (; Begin != End; ++Begin) + if (ShouldBeCounted(*Begin)) + return false; // Too many. + return true; } /// Return true if the sequence [Begin, End) has N or more items. Runs in O(N) /// time. Not meant for use with random-access iterators. -template <typename IterTy> +/// Can optionally take a predicate to lazily filter some items. +template<typename IterTy, + typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)> bool hasNItemsOrMore( IterTy &&Begin, IterTy &&End, unsigned N, - typename std::enable_if< - !std::is_same< - typename std::iterator_traits<typename std::remove_reference< - decltype(Begin)>::type>::iterator_category, - std::random_access_iterator_tag>::value, - void>::type * = nullptr) { - for (; N; --N, ++Begin) + Pred &&ShouldBeCounted = + [](const decltype(*std::declval<IterTy>()) &) { return true; }, + std::enable_if_t< + !std::is_same<typename std::iterator_traits<std::remove_reference_t< + decltype(Begin)>>::iterator_category, + std::random_access_iterator_tag>::value, + void> * = nullptr) { + for (; N; ++Begin) { if (Begin == End) return false; // Too few. + N -= ShouldBeCounted(*Begin); + } return true; } +/// Returns true if the sequence [Begin, End) has N or less items. Can +/// optionally take a predicate to lazily filter some items. +template <typename IterTy, + typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)> +bool hasNItemsOrLess( + IterTy &&Begin, IterTy &&End, unsigned N, + Pred &&ShouldBeCounted = [](const decltype(*std::declval<IterTy>()) &) { + return true; + }) { + assert(N != std::numeric_limits<unsigned>::max()); + return !hasNItemsOrMore(Begin, End, N + 1, ShouldBeCounted); +} + +/// Returns true if the given container has exactly N items +template <typename ContainerTy> bool hasNItems(ContainerTy &&C, unsigned N) { + return hasNItems(std::begin(C), std::end(C), N); +} + +/// Returns true if the given container has N or more items +template <typename ContainerTy> +bool hasNItemsOrMore(ContainerTy &&C, unsigned N) { + return hasNItemsOrMore(std::begin(C), std::end(C), N); +} + +/// Returns true if the given container has N or less items +template <typename ContainerTy> +bool hasNItemsOrLess(ContainerTy &&C, unsigned N) { + return hasNItemsOrLess(std::begin(C), std::end(C), N); +} + /// Returns a raw pointer that represents the same address as the argument. /// -/// The late bound return should be removed once we move to C++14 to better -/// align with the C++20 declaration. Also, this implementation can be removed -/// once we move to C++20 where it's defined as std::to_addres() +/// This implementation can be removed once we move to C++20 where it's defined +/// as std::to_address(). /// /// The std::pointer_traits<>::to_address(p) variations of these overloads has /// not been implemented. -template <class Ptr> auto to_address(const Ptr &P) -> decltype(P.operator->()) { - return P.operator->(); -} +template <class Ptr> auto to_address(const Ptr &P) { return P.operator->(); } template <class T> constexpr T *to_address(T *P) { return P; } } // end namespace llvm diff --git a/llvm/include/llvm/ADT/ScopedHashTable.h b/llvm/include/llvm/ADT/ScopedHashTable.h index 40c49ebc0be1..a5e57c6a16c2 100644 --- a/llvm/include/llvm/ADT/ScopedHashTable.h +++ b/llvm/include/llvm/ADT/ScopedHashTable.h @@ -32,7 +32,7 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseMapInfo.h" -#include "llvm/Support/Allocator.h" +#include "llvm/Support/AllocatorBase.h" #include <cassert> #include <new> diff --git a/llvm/include/llvm/ADT/SetOperations.h b/llvm/include/llvm/ADT/SetOperations.h index 037256a860b2..6087f479fe37 100644 --- a/llvm/include/llvm/ADT/SetOperations.h +++ b/llvm/include/llvm/ADT/SetOperations.h @@ -65,6 +65,27 @@ void set_subtract(S1Ty &S1, const S2Ty &S2) { S1.erase(*SI); } +/// set_is_subset(A, B) - Return true iff A in B +/// +template <class S1Ty, class S2Ty> +bool set_is_subset(const S1Ty &S1, const S2Ty &S2) { + if (S1.size() > S2.size()) + return false; + for (auto &It : S1) + if (!S2.count(It)) + return false; + return true; +} + +/// set_is_strict_subset(A, B) - Return true iff A in B and and A != B +/// +template <class S1Ty, class S2Ty> +bool set_is_strict_subset(const S1Ty &S1, const S2Ty &S2) { + if (S1.size() >= S2.size()) + return false; + return set_is_subset(S1, S2); +} + } // End llvm namespace #endif diff --git a/llvm/include/llvm/ADT/SetVector.h b/llvm/include/llvm/ADT/SetVector.h index d0a0d28d1c81..91ad72143ed3 100644 --- a/llvm/include/llvm/ADT/SetVector.h +++ b/llvm/include/llvm/ADT/SetVector.h @@ -174,7 +174,7 @@ public: set_.erase(V); // FIXME: No need to use the non-const iterator when built with - // std:vector.erase(const_iterator) as defined in C++11. This is for + // std::vector.erase(const_iterator) as defined in C++11. This is for // compatibility with non-standard libstdc++ up to 4.8 (fixed in 4.9). auto NI = vector_.begin(); std::advance(NI, std::distance<iterator>(NI, I)); @@ -263,6 +263,11 @@ public: remove(*SI); } + void swap(SetVector<T, Vector, Set> &RHS) { + set_.swap(RHS.set_); + vector_.swap(RHS.vector_); + } + private: /// A wrapper predicate designed for use with std::remove_if. /// @@ -308,4 +313,22 @@ public: } // end namespace llvm +namespace std { + +/// Implement std::swap in terms of SetVector swap. +template<typename T, typename V, typename S> +inline void +swap(llvm::SetVector<T, V, S> &LHS, llvm::SetVector<T, V, S> &RHS) { + LHS.swap(RHS); +} + +/// Implement std::swap in terms of SmallSetVector swap. +template<typename T, unsigned N> +inline void +swap(llvm::SmallSetVector<T, N> &LHS, llvm::SmallSetVector<T, N> &RHS) { + LHS.swap(RHS); +} + +} // end namespace std + #endif // LLVM_ADT_SETVECTOR_H diff --git a/llvm/include/llvm/ADT/SmallBitVector.h b/llvm/include/llvm/ADT/SmallBitVector.h index 61375c008022..f570bac23ad5 100644 --- a/llvm/include/llvm/ADT/SmallBitVector.h +++ b/llvm/include/llvm/ADT/SmallBitVector.h @@ -287,11 +287,11 @@ public: /// Returns -1 if the next unset bit is not found. int find_next_unset(unsigned Prev) const { if (isSmall()) { - ++Prev; uintptr_t Bits = getSmallBits(); // Mask in previous bits. - uintptr_t Mask = (uintptr_t(1) << Prev) - 1; - Bits |= Mask; + Bits |= (uintptr_t(1) << (Prev + 1)) - 1; + // Mask in unused bits. + Bits |= ~uintptr_t(0) << getSmallSize(); if (Bits == ~uintptr_t(0) || Prev + 1 >= getSmallSize()) return -1; @@ -662,6 +662,19 @@ public: getPointer()->clearBitsNotInMask(Mask, MaskWords); } + void invalid() { + assert(empty()); + X = (uintptr_t)-1; + } + bool isInvalid() const { return X == (uintptr_t)-1; } + + ArrayRef<uintptr_t> getData(uintptr_t &Store) const { + if (!isSmall()) + return getPointer()->getData(); + Store = getSmallBits(); + return makeArrayRef(Store); + } + private: template <bool AddBits, bool InvertMask> void applyMask(const uint32_t *Mask, unsigned MaskWords) { @@ -699,6 +712,24 @@ operator^(const SmallBitVector &LHS, const SmallBitVector &RHS) { return Result; } +template <> struct DenseMapInfo<SmallBitVector> { + static inline SmallBitVector getEmptyKey() { return SmallBitVector(); } + static inline SmallBitVector getTombstoneKey() { + SmallBitVector V; + V.invalid(); + return V; + } + static unsigned getHashValue(const SmallBitVector &V) { + uintptr_t Store; + return DenseMapInfo<std::pair<unsigned, ArrayRef<uintptr_t>>>::getHashValue( + std::make_pair(V.size(), V.getData(Store))); + } + static bool isEqual(const SmallBitVector &LHS, const SmallBitVector &RHS) { + if (LHS.isInvalid() || RHS.isInvalid()) + return LHS.isInvalid() == RHS.isInvalid(); + return LHS == RHS; + } +}; } // end namespace llvm namespace std { diff --git a/llvm/include/llvm/ADT/SmallPtrSet.h b/llvm/include/llvm/ADT/SmallPtrSet.h index 1d8280063c80..0ab05cfe611a 100644 --- a/llvm/include/llvm/ADT/SmallPtrSet.h +++ b/llvm/include/llvm/ADT/SmallPtrSet.h @@ -278,7 +278,7 @@ public: const DebugEpochBase &Epoch) : SmallPtrSetIteratorImpl(BP, E), DebugEpochBase::HandleBase(&Epoch) {} - // Most methods provided by baseclass. + // Most methods are provided by the base class. const PtrTy operator*() const { assert(isHandleInSync() && "invalid iterator access!"); @@ -346,14 +346,8 @@ class SmallPtrSetImpl : public SmallPtrSetImplBase { using ConstPtrTraits = PointerLikeTypeTraits<ConstPtrType>; protected: - // Constructors that forward to the base. - SmallPtrSetImpl(const void **SmallStorage, const SmallPtrSetImpl &that) - : SmallPtrSetImplBase(SmallStorage, that) {} - SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize, - SmallPtrSetImpl &&that) - : SmallPtrSetImplBase(SmallStorage, SmallSize, std::move(that)) {} - explicit SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize) - : SmallPtrSetImplBase(SmallStorage, SmallSize) {} + // Forward constructors to the base. + using SmallPtrSetImplBase::SmallPtrSetImplBase; public: using iterator = SmallPtrSetIterator<PtrType>; @@ -378,7 +372,9 @@ public: return erase_imp(PtrTraits::getAsVoidPointer(Ptr)); } /// count - Return 1 if the specified pointer is in the set, 0 otherwise. - size_type count(ConstPtrType Ptr) const { return find(Ptr) != end() ? 1 : 0; } + size_type count(ConstPtrType Ptr) const { + return find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)) != EndPointer(); + } iterator find(ConstPtrType Ptr) const { return makeIterator(find_imp(ConstPtrTraits::getAsVoidPointer(Ptr))); } diff --git a/llvm/include/llvm/ADT/SmallString.h b/llvm/include/llvm/ADT/SmallString.h index 898be80d0324..cd6f2173d04f 100644 --- a/llvm/include/llvm/ADT/SmallString.h +++ b/llvm/include/llvm/ADT/SmallString.h @@ -263,7 +263,7 @@ public: // Extra methods. /// Explicit conversion to StringRef. - StringRef str() const { return StringRef(this->begin(), this->size()); } + StringRef str() const { return StringRef(this->data(), this->size()); } // TODO: Make this const, if it's safe... const char* c_str() { @@ -275,6 +275,10 @@ public: /// Implicit conversion to StringRef. operator StringRef() const { return str(); } + explicit operator std::string() const { + return std::string(this->data(), this->size()); + } + // Extra operators. const SmallString &operator=(StringRef RHS) { this->clear(); diff --git a/llvm/include/llvm/ADT/SmallVector.h b/llvm/include/llvm/ADT/SmallVector.h index 8c46aa906905..3ccee3d21d48 100644 --- a/llvm/include/llvm/ADT/SmallVector.h +++ b/llvm/include/llvm/ADT/SmallVector.h @@ -16,10 +16,10 @@ #include "llvm/ADT/iterator_range.h" #include "llvm/Support/AlignOf.h" #include "llvm/Support/Compiler.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/MemAlloc.h" #include "llvm/Support/type_traits.h" -#include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> @@ -27,6 +27,7 @@ #include <cstring> #include <initializer_list> #include <iterator> +#include <limits> #include <memory> #include <new> #include <type_traits> @@ -34,11 +35,23 @@ namespace llvm { -/// This is all the non-templated stuff common to all SmallVectors. -class SmallVectorBase { +/// This is all the stuff common to all SmallVectors. +/// +/// The template parameter specifies the type which should be used to hold the +/// Size and Capacity of the SmallVector, so it can be adjusted. +/// Using 32 bit size is desirable to shrink the size of the SmallVector. +/// Using 64 bit size is desirable for cases like SmallVector<char>, where a +/// 32 bit size would limit the vector to ~4GB. SmallVectors are used for +/// buffering bitcode output - which can exceed 4GB. +template <class Size_T> class SmallVectorBase { protected: void *BeginX; - unsigned Size = 0, Capacity; + Size_T Size = 0, Capacity; + + /// The maximum value of the Size_T used. + static constexpr size_t SizeTypeMax() { + return std::numeric_limits<Size_T>::max(); + } SmallVectorBase() = delete; SmallVectorBase(void *FirstEl, size_t TotalCapacity) @@ -46,6 +59,7 @@ protected: /// This is an implementation of the grow() method which only works /// on POD-like data types and is out of line to reduce code duplication. + /// This function will report a fatal error if it cannot increase capacity. void grow_pod(void *FirstEl, size_t MinCapacity, size_t TSize); public: @@ -69,9 +83,14 @@ public: } }; +template <class T> +using SmallVectorSizeType = + typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t, + uint32_t>::type; + /// Figure out the offset of the first element. template <class T, typename = void> struct SmallVectorAlignmentAndSize { - AlignedCharArrayUnion<SmallVectorBase> Base; + AlignedCharArrayUnion<SmallVectorBase<SmallVectorSizeType<T>>> Base; AlignedCharArrayUnion<T> FirstEl; }; @@ -79,7 +98,10 @@ template <class T, typename = void> struct SmallVectorAlignmentAndSize { /// the type T is a POD. The extra dummy template argument is used by ArrayRef /// to avoid unnecessarily requiring T to be complete. template <typename T, typename = void> -class SmallVectorTemplateCommon : public SmallVectorBase { +class SmallVectorTemplateCommon + : public SmallVectorBase<SmallVectorSizeType<T>> { + using Base = SmallVectorBase<SmallVectorSizeType<T>>; + /// Find the address of the first element. For this pointer math to be valid /// with small-size of 0 for T with lots of alignment, it's important that /// SmallVectorStorage is properly-aligned even for small-size of 0. @@ -91,21 +113,20 @@ class SmallVectorTemplateCommon : public SmallVectorBase { // Space after 'FirstEl' is clobbered, do not add any instance vars after it. protected: - SmallVectorTemplateCommon(size_t Size) - : SmallVectorBase(getFirstEl(), Size) {} + SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {} void grow_pod(size_t MinCapacity, size_t TSize) { - SmallVectorBase::grow_pod(getFirstEl(), MinCapacity, TSize); + Base::grow_pod(getFirstEl(), MinCapacity, TSize); } /// Return true if this is a smallvector which has not had dynamic /// memory allocated for it. - bool isSmall() const { return BeginX == getFirstEl(); } + bool isSmall() const { return this->BeginX == getFirstEl(); } /// Put this vector in a state of being small. void resetToSmall() { - BeginX = getFirstEl(); - Size = Capacity = 0; // FIXME: Setting Capacity to 0 is suspect. + this->BeginX = getFirstEl(); + this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect. } public: @@ -123,6 +144,10 @@ public: using pointer = T *; using const_pointer = const T *; + using Base::capacity; + using Base::empty; + using Base::size; + // forward iterator creation methods. iterator begin() { return (iterator)this->BeginX; } const_iterator begin() const { return (const_iterator)this->BeginX; } @@ -136,7 +161,9 @@ public: const_reverse_iterator rend() const { return const_reverse_iterator(begin());} size_type size_in_bytes() const { return size() * sizeof(T); } - size_type max_size() const { return size_type(-1) / sizeof(T); } + size_type max_size() const { + return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T)); + } size_t capacity_in_bytes() const { return capacity() * sizeof(T); } @@ -173,9 +200,17 @@ public: } }; -/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put method -/// implementations that are designed to work with non-POD-like T's. -template <typename T, bool = is_trivially_copyable<T>::value> +/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put +/// method implementations that are designed to work with non-trivial T's. +/// +/// We approximate is_trivially_copyable with trivial move/copy construction and +/// trivial destruction. While the standard doesn't specify that you're allowed +/// copy these types with memcpy, there is no way for the type to observe this. +/// This catches the important case of std::pair<POD, POD>, which is not +/// trivially assignable. +template <typename T, bool = (is_trivially_copy_constructible<T>::value) && + (is_trivially_move_constructible<T>::value) && + std::is_trivially_destructible<T>::value> class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> { protected: SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {} @@ -231,12 +266,21 @@ public: // Define this out-of-line to dissuade the C++ compiler from inlining it. template <typename T, bool TriviallyCopyable> void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) { - if (MinSize > UINT32_MAX) + // Ensure we can fit the new capacity. + // This is only going to be applicable when the capacity is 32 bit. + if (MinSize > this->SizeTypeMax()) report_bad_alloc_error("SmallVector capacity overflow during allocation"); + // Ensure we can meet the guarantee of space for at least one more element. + // The above check alone will not catch the case where grow is called with a + // default MinCapacity of 0, but the current capacity cannot be increased. + // This is only going to be applicable when the capacity is 32 bit. + if (this->capacity() == this->SizeTypeMax()) + report_bad_alloc_error("SmallVector capacity unable to grow"); + // Always grow, even from zero. size_t NewCapacity = size_t(NextPowerOf2(this->capacity() + 2)); - NewCapacity = std::min(std::max(NewCapacity, MinSize), size_t(UINT32_MAX)); + NewCapacity = std::min(std::max(NewCapacity, MinSize), this->SizeTypeMax()); T *NewElts = static_cast<T*>(llvm::safe_malloc(NewCapacity*sizeof(T))); // Move the elements over. @@ -254,7 +298,9 @@ void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) { } /// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put -/// method implementations that are designed to work with POD-like T's. +/// method implementations that are designed to work with trivially copyable +/// T's. This allows using memcpy in place of copy/move construction and +/// skipping destruction. template <typename T> class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> { protected: @@ -284,8 +330,8 @@ protected: template <typename T1, typename T2> static void uninitialized_copy( T1 *I, T1 *E, T2 *Dest, - typename std::enable_if<std::is_same<typename std::remove_const<T1>::type, - T2>::value>::type * = nullptr) { + std::enable_if_t<std::is_same<typename std::remove_const<T1>::type, + T2>::value> * = nullptr) { // Use memcpy for PODs iterated by pointers (which includes SmallVector // iterators): std::uninitialized_copy optimizes to memmove, but we can // use memcpy here. Note that I and E are iterators and thus might be @@ -381,9 +427,9 @@ public: /// Add the specified range to the end of the SmallVector. template <typename in_iter, - typename = typename std::enable_if<std::is_convertible< + typename = std::enable_if_t<std::is_convertible< typename std::iterator_traits<in_iter>::iterator_category, - std::input_iterator_tag>::value>::type> + std::input_iterator_tag>::value>> void append(in_iter in_start, in_iter in_end) { size_type NumInputs = std::distance(in_start, in_end); if (NumInputs > this->capacity() - this->size()) @@ -418,9 +464,9 @@ public: } template <typename in_iter, - typename = typename std::enable_if<std::is_convertible< + typename = std::enable_if_t<std::is_convertible< typename std::iterator_traits<in_iter>::iterator_category, - std::input_iterator_tag>::value>::type> + std::input_iterator_tag>::value>> void assign(in_iter in_start, in_iter in_end) { clear(); append(in_start, in_end); @@ -575,9 +621,9 @@ public: } template <typename ItTy, - typename = typename std::enable_if<std::is_convertible< + typename = std::enable_if_t<std::is_convertible< typename std::iterator_traits<ItTy>::iterator_category, - std::input_iterator_tag>::value>::type> + std::input_iterator_tag>::value>> iterator insert(iterator I, ItTy From, ItTy To) { // Convert iterator to elt# to avoid invalidating iterator when we reserve() size_t InsertElt = I - this->begin(); @@ -834,7 +880,8 @@ template <typename T> struct alignas(alignof(T)) SmallVectorStorage<T, 0> {}; /// Note that this does not attempt to be exception safe. /// template <typename T, unsigned N> -class SmallVector : public SmallVectorImpl<T>, SmallVectorStorage<T, N> { +class LLVM_GSL_OWNER SmallVector : public SmallVectorImpl<T>, + SmallVectorStorage<T, N> { public: SmallVector() : SmallVectorImpl<T>(N) {} @@ -849,9 +896,9 @@ public: } template <typename ItTy, - typename = typename std::enable_if<std::is_convertible< + typename = std::enable_if_t<std::is_convertible< typename std::iterator_traits<ItTy>::iterator_category, - std::input_iterator_tag>::value>::type> + std::input_iterator_tag>::value>> SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) { this->append(S, E); } diff --git a/llvm/include/llvm/ADT/SparseMultiSet.h b/llvm/include/llvm/ADT/SparseMultiSet.h index d9d3ff459267..307d2c3f84e5 100644 --- a/llvm/include/llvm/ADT/SparseMultiSet.h +++ b/llvm/include/llvm/ADT/SparseMultiSet.h @@ -94,7 +94,7 @@ class SparseMultiSet { /// tombstones, in which case they are actually nodes in a single-linked /// freelist of recyclable slots. struct SMSNode { - static const unsigned INVALID = ~0U; + static constexpr unsigned INVALID = ~0U; ValueT Data; unsigned Prev; diff --git a/llvm/include/llvm/ADT/SparseSet.h b/llvm/include/llvm/ADT/SparseSet.h index a6eb9b942e80..74457d5fd679 100644 --- a/llvm/include/llvm/ADT/SparseSet.h +++ b/llvm/include/llvm/ADT/SparseSet.h @@ -21,7 +21,7 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" -#include "llvm/Support/Allocator.h" +#include "llvm/Support/AllocatorBase.h" #include <cassert> #include <cstdint> #include <cstdlib> @@ -79,7 +79,7 @@ struct SparseSetValFunctor<KeyT, KeyT, KeyFunctorT> { } }; -/// SparseSet - Fast set implmentation for objects that can be identified by +/// SparseSet - Fast set implementation for objects that can be identified by /// small unsigned keys. /// /// SparseSet allocates memory proportional to the size of the key universe, so diff --git a/llvm/include/llvm/ADT/StringExtras.h b/llvm/include/llvm/ADT/StringExtras.h index ef1a11e0619b..990a3054a9d2 100644 --- a/llvm/include/llvm/ADT/StringExtras.h +++ b/llvm/include/llvm/ADT/StringExtras.h @@ -107,6 +107,14 @@ inline bool isPrint(char C) { return (0x20 <= UC) && (UC <= 0x7E); } +/// Checks whether character \p C is whitespace in the "C" locale. +/// +/// Locale-independent version of the C standard library isspace. +inline bool isSpace(char C) { + return C == ' ' || C == '\f' || C == '\n' || C == '\r' || C == '\t' || + C == '\v'; +} + /// Returns the corresponding lowercase character if \p x is uppercase. inline char toLower(char x) { if (x >= 'A' && x <= 'Z') @@ -237,7 +245,7 @@ inline std::string utostr(uint64_t X, bool isNeg = false) { inline std::string itostr(int64_t X) { if (X < 0) - return utostr(static_cast<uint64_t>(-X), true); + return utostr(-static_cast<uint64_t>(X), true); else return utostr(static_cast<uint64_t>(X)); } @@ -292,6 +300,18 @@ void printHTMLEscaped(StringRef String, raw_ostream &Out); /// printLowerCase - Print each character as lowercase if it is uppercase. void printLowerCase(StringRef String, raw_ostream &Out); +/// Converts a string from camel-case to snake-case by replacing all uppercase +/// letters with '_' followed by the letter in lowercase, except if the +/// uppercase letter is the first character of the string. +std::string convertToSnakeFromCamelCase(StringRef input); + +/// Converts a string from snake-case to camel-case by replacing all occurrences +/// of '_' followed by a lowercase letter with the letter in uppercase. +/// Optionally allow capitalization of the first letter (if it is a lowercase +/// letter) +std::string convertToCamelFromSnakeCase(StringRef input, + bool capitalizeFirst = false); + namespace detail { template <typename IteratorT> diff --git a/llvm/include/llvm/ADT/StringMap.h b/llvm/include/llvm/ADT/StringMap.h index 108185bd07b9..840f328db796 100644 --- a/llvm/include/llvm/ADT/StringMap.h +++ b/llvm/include/llvm/ADT/StringMap.h @@ -13,36 +13,17 @@ #ifndef LLVM_ADT_STRINGMAP_H #define LLVM_ADT_STRINGMAP_H -#include "llvm/ADT/StringRef.h" -#include "llvm/ADT/iterator.h" -#include "llvm/ADT/iterator_range.h" -#include "llvm/Support/Allocator.h" +#include "llvm/ADT/StringMapEntry.h" +#include "llvm/Support/AllocatorBase.h" #include "llvm/Support/PointerLikeTypeTraits.h" -#include "llvm/Support/ErrorHandling.h" -#include <algorithm> -#include <cassert> -#include <cstdint> -#include <cstdlib> -#include <cstring> #include <initializer_list> #include <iterator> -#include <utility> namespace llvm { -template<typename ValueTy> class StringMapConstIterator; -template<typename ValueTy> class StringMapIterator; -template<typename ValueTy> class StringMapKeyIterator; - -/// StringMapEntryBase - Shared base class of StringMapEntry instances. -class StringMapEntryBase { - size_t StrLen; - -public: - explicit StringMapEntryBase(size_t Len) : StrLen(Len) {} - - size_t getKeyLength() const { return StrLen; } -}; +template <typename ValueTy> class StringMapConstIterator; +template <typename ValueTy> class StringMapIterator; +template <typename ValueTy> class StringMapKeyIterator; /// StringMapImpl - This is the base class of StringMap that is shared among /// all of its instantiations. @@ -58,8 +39,7 @@ protected: unsigned ItemSize; protected: - explicit StringMapImpl(unsigned itemSize) - : ItemSize(itemSize) {} + explicit StringMapImpl(unsigned itemSize) : ItemSize(itemSize) {} StringMapImpl(StringMapImpl &&RHS) : TheTable(RHS.TheTable), NumBuckets(RHS.NumBuckets), NumItems(RHS.NumItems), NumTombstones(RHS.NumTombstones), @@ -118,127 +98,11 @@ public: } }; -/// StringMapEntryStorage - Holds the value in a StringMapEntry. -/// -/// Factored out into a separate base class to make it easier to specialize. -/// This is primarily intended to support StringSet, which doesn't need a value -/// stored at all. -template<typename ValueTy> -class StringMapEntryStorage : public StringMapEntryBase { -public: - ValueTy second; - - explicit StringMapEntryStorage(size_t strLen) - : StringMapEntryBase(strLen), second() {} - template <typename... InitTy> - StringMapEntryStorage(size_t strLen, InitTy &&... InitVals) - : StringMapEntryBase(strLen), second(std::forward<InitTy>(InitVals)...) {} - StringMapEntryStorage(StringMapEntryStorage &E) = delete; - - const ValueTy &getValue() const { return second; } - ValueTy &getValue() { return second; } - - void setValue(const ValueTy &V) { second = V; } -}; - -template<> -class StringMapEntryStorage<NoneType> : public StringMapEntryBase { -public: - explicit StringMapEntryStorage(size_t strLen, NoneType none = None) - : StringMapEntryBase(strLen) {} - StringMapEntryStorage(StringMapEntryStorage &E) = delete; - - NoneType getValue() const { return None; } -}; - -/// StringMapEntry - This is used to represent one value that is inserted into -/// a StringMap. It contains the Value itself and the key: the string length -/// and data. -template<typename ValueTy> -class StringMapEntry final : public StringMapEntryStorage<ValueTy> { -public: - using StringMapEntryStorage<ValueTy>::StringMapEntryStorage; - - StringRef getKey() const { - return StringRef(getKeyData(), this->getKeyLength()); - } - - /// getKeyData - Return the start of the string data that is the key for this - /// value. The string data is always stored immediately after the - /// StringMapEntry object. - const char *getKeyData() const {return reinterpret_cast<const char*>(this+1);} - - StringRef first() const { - return StringRef(getKeyData(), this->getKeyLength()); - } - - /// Create a StringMapEntry for the specified key construct the value using - /// \p InitiVals. - template <typename AllocatorTy, typename... InitTy> - static StringMapEntry *Create(StringRef Key, AllocatorTy &Allocator, - InitTy &&... InitVals) { - size_t KeyLength = Key.size(); - - // Allocate a new item with space for the string at the end and a null - // terminator. - size_t AllocSize = sizeof(StringMapEntry) + KeyLength + 1; - size_t Alignment = alignof(StringMapEntry); - - StringMapEntry *NewItem = - static_cast<StringMapEntry*>(Allocator.Allocate(AllocSize,Alignment)); - assert(NewItem && "Unhandled out-of-memory"); - - // Construct the value. - new (NewItem) StringMapEntry(KeyLength, std::forward<InitTy>(InitVals)...); - - // Copy the string information. - char *StrBuffer = const_cast<char*>(NewItem->getKeyData()); - if (KeyLength > 0) - memcpy(StrBuffer, Key.data(), KeyLength); - StrBuffer[KeyLength] = 0; // Null terminate for convenience of clients. - return NewItem; - } - - /// Create - Create a StringMapEntry with normal malloc/free. - template <typename... InitType> - static StringMapEntry *Create(StringRef Key, InitType &&... InitVal) { - MallocAllocator A; - return Create(Key, A, std::forward<InitType>(InitVal)...); - } - - static StringMapEntry *Create(StringRef Key) { - return Create(Key, ValueTy()); - } - - /// GetStringMapEntryFromKeyData - Given key data that is known to be embedded - /// into a StringMapEntry, return the StringMapEntry itself. - static StringMapEntry &GetStringMapEntryFromKeyData(const char *KeyData) { - char *Ptr = const_cast<char*>(KeyData) - sizeof(StringMapEntry<ValueTy>); - return *reinterpret_cast<StringMapEntry*>(Ptr); - } - - /// Destroy - Destroy this StringMapEntry, releasing memory back to the - /// specified allocator. - template<typename AllocatorTy> - void Destroy(AllocatorTy &Allocator) { - // Free memory referenced by the item. - size_t AllocSize = sizeof(StringMapEntry) + this->getKeyLength() + 1; - this->~StringMapEntry(); - Allocator.Deallocate(static_cast<void *>(this), AllocSize); - } - - /// Destroy this object, releasing memory back to the malloc allocator. - void Destroy() { - MallocAllocator A; - Destroy(A); - } -}; - /// StringMap - This is an unconventional map that is specialized for handling /// keys that are "strings", which are basically ranges of bytes. This does some /// funky memory allocation and hashing things to make it extremely efficient, /// storing the string data *after* the value in the map. -template<typename ValueTy, typename AllocatorTy = MallocAllocator> +template <typename ValueTy, typename AllocatorTy = MallocAllocator> class StringMap : public StringMapImpl { AllocatorTy Allocator; @@ -248,14 +112,15 @@ public: StringMap() : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {} explicit StringMap(unsigned InitialSize) - : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {} + : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {} explicit StringMap(AllocatorTy A) - : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), Allocator(A) {} + : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), Allocator(A) { + } StringMap(unsigned InitialSize, AllocatorTy A) - : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))), - Allocator(A) {} + : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))), + Allocator(A) {} StringMap(std::initializer_list<std::pair<StringRef, ValueTy>> List) : StringMapImpl(List.size(), static_cast<unsigned>(sizeof(MapEntryTy))) { @@ -267,9 +132,9 @@ public: StringMap(StringMap &&RHS) : StringMapImpl(std::move(RHS)), Allocator(std::move(RHS.Allocator)) {} - StringMap(const StringMap &RHS) : - StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), - Allocator(RHS.Allocator) { + StringMap(const StringMap &RHS) + : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), + Allocator(RHS.Allocator) { if (RHS.empty()) return; @@ -316,7 +181,7 @@ public: for (unsigned I = 0, E = NumBuckets; I != E; ++I) { StringMapEntryBase *Bucket = TheTable[I]; if (Bucket && Bucket != getTombstoneVal()) { - static_cast<MapEntryTy*>(Bucket)->Destroy(Allocator); + static_cast<MapEntryTy *>(Bucket)->Destroy(Allocator); } } } @@ -326,7 +191,7 @@ public: AllocatorTy &getAllocator() { return Allocator; } const AllocatorTy &getAllocator() const { return Allocator; } - using key_type = const char*; + using key_type = const char *; using mapped_type = ValueTy; using value_type = StringMapEntry<ValueTy>; using size_type = size_t; @@ -334,17 +199,13 @@ public: using const_iterator = StringMapConstIterator<ValueTy>; using iterator = StringMapIterator<ValueTy>; - iterator begin() { - return iterator(TheTable, NumBuckets == 0); - } - iterator end() { - return iterator(TheTable+NumBuckets, true); - } + iterator begin() { return iterator(TheTable, NumBuckets == 0); } + iterator end() { return iterator(TheTable + NumBuckets, true); } const_iterator begin() const { return const_iterator(TheTable, NumBuckets == 0); } const_iterator end() const { - return const_iterator(TheTable+NumBuckets, true); + return const_iterator(TheTable + NumBuckets, true); } iterator_range<StringMapKeyIterator<ValueTy>> keys() const { @@ -354,14 +215,16 @@ public: iterator find(StringRef Key) { int Bucket = FindKey(Key); - if (Bucket == -1) return end(); - return iterator(TheTable+Bucket, true); + if (Bucket == -1) + return end(); + return iterator(TheTable + Bucket, true); } const_iterator find(StringRef Key) const { int Bucket = FindKey(Key); - if (Bucket == -1) return end(); - return const_iterator(TheTable+Bucket, true); + if (Bucket == -1) + return end(); + return const_iterator(TheTable + Bucket, true); } /// lookup - Return the entry for the specified key, or a default @@ -378,15 +241,33 @@ public: ValueTy &operator[](StringRef Key) { return try_emplace(Key).first->second; } /// count - Return 1 if the element is in the map, 0 otherwise. - size_type count(StringRef Key) const { - return find(Key) == end() ? 0 : 1; - } + size_type count(StringRef Key) const { return find(Key) == end() ? 0 : 1; } template <typename InputTy> size_type count(const StringMapEntry<InputTy> &MapEntry) const { return count(MapEntry.getKey()); } + /// equal - check whether both of the containers are equal. + bool operator==(const StringMap &RHS) const { + if (size() != RHS.size()) + return false; + + for (const auto &KeyValue : *this) { + auto FindInRHS = RHS.find(KeyValue.getKey()); + + if (FindInRHS == RHS.end()) + return false; + + if (!(KeyValue.getValue() == FindInRHS->getValue())) + return false; + } + + return true; + } + + bool operator!=(const StringMap &RHS) const { return !(*this == RHS); } + /// insert - Insert the specified key/value pair into the map. If the key /// already exists in the map, return false and ignore the request, otherwise /// insert it and return true. @@ -394,7 +275,7 @@ public: unsigned BucketNo = LookupBucketFor(KeyValue->getKey()); StringMapEntryBase *&Bucket = TheTable[BucketNo]; if (Bucket && Bucket != getTombstoneVal()) - return false; // Already exists in map. + return false; // Already exists in map. if (Bucket == getTombstoneVal()) --NumTombstones; @@ -448,14 +329,15 @@ public: // clear - Empties out the StringMap void clear() { - if (empty()) return; + if (empty()) + return; // Zap all values, resetting the keys back to non-present (not tombstone), // which is safe because we're removing all elements. for (unsigned I = 0, E = NumBuckets; I != E; ++I) { StringMapEntryBase *&Bucket = TheTable[I]; if (Bucket && Bucket != getTombstoneVal()) { - static_cast<MapEntryTy*>(Bucket)->Destroy(Allocator); + static_cast<MapEntryTy *>(Bucket)->Destroy(Allocator); } Bucket = nullptr; } @@ -466,9 +348,7 @@ public: /// remove - Remove the specified key/value pair from the map, but do not /// erase it. This aborts if the key is not in the map. - void remove(MapEntryTy *KeyValue) { - RemoveKey(KeyValue); - } + void remove(MapEntryTy *KeyValue) { RemoveKey(KeyValue); } void erase(iterator I) { MapEntryTy &V = *I; @@ -478,7 +358,8 @@ public: bool erase(StringRef Key) { iterator I = find(Key); - if (I == end()) return false; + if (I == end()) + return false; erase(I); return true; } @@ -497,7 +378,8 @@ public: explicit StringMapIterBase(StringMapEntryBase **Bucket, bool NoAdvance = false) : Ptr(Bucket) { - if (!NoAdvance) AdvancePastEmptyBuckets(); + if (!NoAdvance) + AdvancePastEmptyBuckets(); } DerivedTy &operator=(const DerivedTy &Other) { diff --git a/llvm/include/llvm/ADT/StringMapEntry.h b/llvm/include/llvm/ADT/StringMapEntry.h new file mode 100644 index 000000000000..ea3aad6f1cb1 --- /dev/null +++ b/llvm/include/llvm/ADT/StringMapEntry.h @@ -0,0 +1,135 @@ +//===- StringMapEntry.h - String Hash table map interface -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the StringMapEntry class - it is intended to be a low +// dependency implementation detail of StringMap that is more suitable for +// inclusion in public headers than StringMap.h itself is. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_STRINGMAPENTRY_H +#define LLVM_ADT_STRINGMAPENTRY_H + +#include "llvm/ADT/StringRef.h" + +namespace llvm { + +/// StringMapEntryBase - Shared base class of StringMapEntry instances. +class StringMapEntryBase { + size_t keyLength; + +public: + explicit StringMapEntryBase(size_t keyLength) : keyLength(keyLength) {} + + size_t getKeyLength() const { return keyLength; } +}; + +/// StringMapEntryStorage - Holds the value in a StringMapEntry. +/// +/// Factored out into a separate base class to make it easier to specialize. +/// This is primarily intended to support StringSet, which doesn't need a value +/// stored at all. +template <typename ValueTy> +class StringMapEntryStorage : public StringMapEntryBase { +public: + ValueTy second; + + explicit StringMapEntryStorage(size_t keyLength) + : StringMapEntryBase(keyLength), second() {} + template <typename... InitTy> + StringMapEntryStorage(size_t keyLength, InitTy &&... initVals) + : StringMapEntryBase(keyLength), + second(std::forward<InitTy>(initVals)...) {} + StringMapEntryStorage(StringMapEntryStorage &e) = delete; + + const ValueTy &getValue() const { return second; } + ValueTy &getValue() { return second; } + + void setValue(const ValueTy &V) { second = V; } +}; + +template <> class StringMapEntryStorage<NoneType> : public StringMapEntryBase { +public: + explicit StringMapEntryStorage(size_t keyLength, NoneType none = None) + : StringMapEntryBase(keyLength) {} + StringMapEntryStorage(StringMapEntryStorage &entry) = delete; + + NoneType getValue() const { return None; } +}; + +/// StringMapEntry - This is used to represent one value that is inserted into +/// a StringMap. It contains the Value itself and the key: the string length +/// and data. +template <typename ValueTy> +class StringMapEntry final : public StringMapEntryStorage<ValueTy> { +public: + using StringMapEntryStorage<ValueTy>::StringMapEntryStorage; + + StringRef getKey() const { + return StringRef(getKeyData(), this->getKeyLength()); + } + + /// getKeyData - Return the start of the string data that is the key for this + /// value. The string data is always stored immediately after the + /// StringMapEntry object. + const char *getKeyData() const { + return reinterpret_cast<const char *>(this + 1); + } + + StringRef first() const { + return StringRef(getKeyData(), this->getKeyLength()); + } + + /// Create a StringMapEntry for the specified key construct the value using + /// \p InitiVals. + template <typename AllocatorTy, typename... InitTy> + static StringMapEntry *Create(StringRef key, AllocatorTy &allocator, + InitTy &&... initVals) { + size_t keyLength = key.size(); + + // Allocate a new item with space for the string at the end and a null + // terminator. + size_t allocSize = sizeof(StringMapEntry) + keyLength + 1; + size_t alignment = alignof(StringMapEntry); + + StringMapEntry *newItem = + static_cast<StringMapEntry *>(allocator.Allocate(allocSize, alignment)); + assert(newItem && "Unhandled out-of-memory"); + + // Construct the value. + new (newItem) StringMapEntry(keyLength, std::forward<InitTy>(initVals)...); + + // Copy the string information. + char *strBuffer = const_cast<char *>(newItem->getKeyData()); + if (keyLength > 0) + memcpy(strBuffer, key.data(), keyLength); + strBuffer[keyLength] = 0; // Null terminate for convenience of clients. + return newItem; + } + + /// GetStringMapEntryFromKeyData - Given key data that is known to be embedded + /// into a StringMapEntry, return the StringMapEntry itself. + static StringMapEntry &GetStringMapEntryFromKeyData(const char *keyData) { + char *ptr = const_cast<char *>(keyData) - sizeof(StringMapEntry<ValueTy>); + return *reinterpret_cast<StringMapEntry *>(ptr); + } + + /// Destroy - Destroy this StringMapEntry, releasing memory back to the + /// specified allocator. + template <typename AllocatorTy> void Destroy(AllocatorTy &allocator) { + // Free memory referenced by the item. + size_t AllocSize = sizeof(StringMapEntry) + this->getKeyLength() + 1; + this->~StringMapEntry(); + allocator.Deallocate(static_cast<void *>(this), AllocSize, + alignof(StringMapEntry)); + } +}; + +} // end namespace llvm + +#endif // LLVM_ADT_STRINGMAPENTRY_H diff --git a/llvm/include/llvm/ADT/StringRef.h b/llvm/include/llvm/ADT/StringRef.h index 9bfaaccd953e..98c120fe2d2e 100644 --- a/llvm/include/llvm/ADT/StringRef.h +++ b/llvm/include/llvm/ADT/StringRef.h @@ -18,6 +18,9 @@ #include <cstring> #include <limits> #include <string> +#if __cplusplus > 201402L +#include <string_view> +#endif #include <type_traits> #include <utility> @@ -51,9 +54,9 @@ namespace llvm { /// situations where the character data resides in some other buffer, whose /// lifetime extends past that of the StringRef. For this reason, it is not in /// general safe to store a StringRef. - class StringRef { + class LLVM_GSL_POINTER StringRef { public: - static const size_t npos = ~size_t(0); + static constexpr size_t npos = ~size_t(0); using iterator = const char *; using const_iterator = const char *; @@ -77,7 +80,8 @@ namespace llvm { static constexpr size_t strLen(const char *Str) { #if __cplusplus > 201402L return std::char_traits<char>::length(Str); -#elif __has_builtin(__builtin_strlen) || defined(__GNUC__) || defined(_MSC_VER) +#elif __has_builtin(__builtin_strlen) || defined(__GNUC__) || \ + (defined(_MSC_VER) && _MSC_VER >= 1916) return __builtin_strlen(Str); #else const char *Begin = Str; @@ -110,6 +114,12 @@ namespace llvm { /*implicit*/ StringRef(const std::string &Str) : Data(Str.data()), Length(Str.length()) {} +#if __cplusplus > 201402L + /// Construct a string ref from an std::string_view. + /*implicit*/ constexpr StringRef(std::string_view Str) + : Data(Str.data()), Length(Str.size()) {} +#endif + static StringRef withNullAsEmpty(const char *data) { return StringRef(data ? data : ""); } @@ -255,17 +265,20 @@ namespace llvm { /// The declaration here is extra complicated so that `stringRef = {}` /// and `stringRef = "abc"` continue to select the move assignment operator. template <typename T> - typename std::enable_if<std::is_same<T, std::string>::value, - StringRef>::type & + std::enable_if_t<std::is_same<T, std::string>::value, StringRef> & operator=(T &&Str) = delete; /// @} /// @name Type Conversions /// @{ - operator std::string() const { - return str(); + explicit operator std::string() const { return str(); } + +#if __cplusplus > 201402L + operator std::string_view() const { + return std::string_view(data(), size()); } +#endif /// @} /// @name String Predicates @@ -494,7 +507,7 @@ namespace llvm { /// this returns true to signify the error. The string is considered /// erroneous if empty or if it overflows T. template <typename T> - typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type + std::enable_if_t<std::numeric_limits<T>::is_signed, bool> getAsInteger(unsigned Radix, T &Result) const { long long LLVal; if (getAsSignedInteger(*this, Radix, LLVal) || @@ -505,7 +518,7 @@ namespace llvm { } template <typename T> - typename std::enable_if<!std::numeric_limits<T>::is_signed, bool>::type + std::enable_if_t<!std::numeric_limits<T>::is_signed, bool> getAsInteger(unsigned Radix, T &Result) const { unsigned long long ULLVal; // The additional cast to unsigned long long is required to avoid the @@ -528,7 +541,7 @@ namespace llvm { /// The portion of the string representing the discovered numeric value /// is removed from the beginning of the string. template <typename T> - typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type + std::enable_if_t<std::numeric_limits<T>::is_signed, bool> consumeInteger(unsigned Radix, T &Result) { long long LLVal; if (consumeSignedInteger(*this, Radix, LLVal) || @@ -539,7 +552,7 @@ namespace llvm { } template <typename T> - typename std::enable_if<!std::numeric_limits<T>::is_signed, bool>::type + std::enable_if_t<!std::numeric_limits<T>::is_signed, bool> consumeInteger(unsigned Radix, T &Result) { unsigned long long ULLVal; if (consumeUnsignedInteger(*this, Radix, ULLVal) || diff --git a/llvm/include/llvm/ADT/StringSet.h b/llvm/include/llvm/ADT/StringSet.h index 60be09d3c326..63d929399a4e 100644 --- a/llvm/include/llvm/ADT/StringSet.h +++ b/llvm/include/llvm/ADT/StringSet.h @@ -1,4 +1,4 @@ -//===- StringSet.h - The LLVM Compiler Driver -------------------*- C++ -*-===// +//===- StringSet.h - An efficient set built on StringMap --------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -14,44 +14,38 @@ #define LLVM_ADT_STRINGSET_H #include "llvm/ADT/StringMap.h" -#include "llvm/ADT/StringRef.h" -#include "llvm/Support/Allocator.h" -#include <cassert> -#include <initializer_list> -#include <utility> namespace llvm { - /// StringSet - A wrapper for StringMap that provides set-like functionality. - template <class AllocatorTy = MallocAllocator> - class StringSet : public StringMap<NoneType, AllocatorTy> { - using base = StringMap<NoneType, AllocatorTy>; - - public: - StringSet() = default; - StringSet(std::initializer_list<StringRef> S) { - for (StringRef X : S) - insert(X); - } - explicit StringSet(AllocatorTy A) : base(A) {} - - std::pair<typename base::iterator, bool> insert(StringRef Key) { - assert(!Key.empty()); - return base::insert(std::make_pair(Key, None)); - } - - template <typename InputIt> - void insert(const InputIt &Begin, const InputIt &End) { - for (auto It = Begin; It != End; ++It) - base::insert(std::make_pair(*It, None)); - } - - template <typename ValueTy> - std::pair<typename base::iterator, bool> - insert(const StringMapEntry<ValueTy> &MapEntry) { - return insert(MapEntry.getKey()); - } - }; +/// StringSet - A wrapper for StringMap that provides set-like functionality. +template <class AllocatorTy = MallocAllocator> +class StringSet : public StringMap<NoneType, AllocatorTy> { + using Base = StringMap<NoneType, AllocatorTy>; + +public: + StringSet() = default; + StringSet(std::initializer_list<StringRef> initializer) { + for (StringRef str : initializer) + insert(str); + } + explicit StringSet(AllocatorTy a) : Base(a) {} + + std::pair<typename Base::iterator, bool> insert(StringRef key) { + return Base::try_emplace(key); + } + + template <typename InputIt> + void insert(const InputIt &begin, const InputIt &end) { + for (auto it = begin; it != end; ++it) + insert(*it); + } + + template <typename ValueTy> + std::pair<typename Base::iterator, bool> + insert(const StringMapEntry<ValueTy> &mapEntry) { + return insert(mapEntry.getKey()); + } +}; } // end namespace llvm diff --git a/llvm/include/llvm/ADT/TinyPtrVector.h b/llvm/include/llvm/ADT/TinyPtrVector.h index 6b76d35d4e92..ed20a762f307 100644 --- a/llvm/include/llvm/ADT/TinyPtrVector.h +++ b/llvm/include/llvm/ADT/TinyPtrVector.h @@ -152,10 +152,10 @@ public: } // Implicit conversion to ArrayRef<U> if EltTy* implicitly converts to U*. - template<typename U, - typename std::enable_if< - std::is_convertible<ArrayRef<EltTy>, ArrayRef<U>>::value, - bool>::type = false> + template < + typename U, + std::enable_if_t<std::is_convertible<ArrayRef<EltTy>, ArrayRef<U>>::value, + bool> = false> operator ArrayRef<U>() const { return operator ArrayRef<EltTy>(); } diff --git a/llvm/include/llvm/ADT/Triple.h b/llvm/include/llvm/ADT/Triple.h index 76a754d671fb..6bad18f19244 100644 --- a/llvm/include/llvm/ADT/Triple.h +++ b/llvm/include/llvm/ADT/Triple.h @@ -19,6 +19,8 @@ namespace llvm { +class VersionTuple; + /// Triple - Helper class for working with autoconf configuration names. For /// historical reasons, we also call these 'triples' (they used to contain /// exactly three fields). @@ -101,6 +103,7 @@ public: enum SubArchType { NoSubArch, + ARMSubArch_v8_6a, ARMSubArch_v8_5a, ARMSubArch_v8_4a, ARMSubArch_v8_3a, @@ -437,17 +440,7 @@ public: /// compatibility, which handles supporting skewed version numbering schemes /// used by the "darwin" triples. bool isMacOSXVersionLT(unsigned Major, unsigned Minor = 0, - unsigned Micro = 0) const { - assert(isMacOSX() && "Not an OS X triple!"); - - // If this is OS X, expect a sane version number. - if (getOS() == Triple::MacOSX) - return isOSVersionLT(Major, Minor, Micro); - - // Otherwise, compare to the "Darwin" number. - assert(Major == 10 && "Unexpected major version"); - return isOSVersionLT(Minor + 4, Micro, 0); - } + unsigned Micro = 0) const; /// isMacOSX - Is this a Mac OS X triple. For legacy reasons, we support both /// "darwin" and "osx" as OS X triples. @@ -691,6 +684,13 @@ public: return getArch() == Triple::nvptx || getArch() == Triple::nvptx64; } + /// Tests whether the target is AMDGCN + bool isAMDGCN() const { return getArch() == Triple::amdgcn; } + + bool isAMDGPU() const { + return getArch() == Triple::r600 || getArch() == Triple::amdgcn; + } + /// Tests whether the target is Thumb (little and big endian). bool isThumb() const { return getArch() == Triple::thumb || getArch() == Triple::thumbeb; @@ -731,6 +731,11 @@ public: return getArch() == Triple::riscv32 || getArch() == Triple::riscv64; } + /// Tests whether the target is SystemZ. + bool isSystemZ() const { + return getArch() == Triple::systemz; + } + /// Tests whether the target is x86 (32- or 64-bit). bool isX86() const { return getArch() == Triple::x86 || getArch() == Triple::x86_64; @@ -741,9 +746,14 @@ public: return getArch() == Triple::ve; } + /// Tests whether the target is wasm (32- and 64-bit). + bool isWasm() const { + return getArch() == Triple::wasm32 || getArch() == Triple::wasm64; + } + /// Tests whether the target supports comdat bool supportsCOMDAT() const { - return !isOSBinFormatMachO(); + return !(isOSBinFormatMachO() || isOSBinFormatXCOFF()); } /// Tests whether the target uses emulated TLS as default. @@ -850,6 +860,12 @@ public: /// Merge target triples. std::string merge(const Triple &Other) const; + /// Some platforms have different minimum supported OS versions that + /// varies by the architecture specified in the triple. This function + /// returns the minimum supported OS version for this triple if one an exists, + /// or an invalid version tuple if this triple doesn't have one. + VersionTuple getMinimumSupportedOSVersion() const; + /// @} /// @name Static helpers for IDs. /// @{ @@ -884,6 +900,10 @@ public: static ArchType getArchTypeForLLVMName(StringRef Str); /// @} + + /// Returns a canonicalized OS version number for the specified OS. + static VersionTuple getCanonicalVersionForOS(OSType OSKind, + const VersionTuple &Version); }; } // End llvm namespace diff --git a/llvm/include/llvm/ADT/Twine.h b/llvm/include/llvm/ADT/Twine.h index 2dc7486c924f..4140c22aad3d 100644 --- a/llvm/include/llvm/ADT/Twine.h +++ b/llvm/include/llvm/ADT/Twine.h @@ -153,11 +153,11 @@ namespace llvm { /// LHS - The prefix in the concatenation, which may be uninitialized for /// Null or Empty kinds. - Child LHS = {0}; + Child LHS; /// RHS - The suffix in the concatenation, which may be uninitialized for /// Null or Empty kinds. - Child RHS = {0}; + Child RHS; /// LHSKind - The NodeKind of the left hand side, \see getLHSKind(). NodeKind LHSKind = EmptyKind; diff --git a/llvm/include/llvm/ADT/TypeSwitch.h b/llvm/include/llvm/ADT/TypeSwitch.h new file mode 100644 index 000000000000..bfcb2064301d --- /dev/null +++ b/llvm/include/llvm/ADT/TypeSwitch.h @@ -0,0 +1,176 @@ +//===- TypeSwitch.h - Switch functionality for RTTI casting -*- C++ -*-----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements the TypeSwitch template, which mimics a switch() +// statement whose cases are type names. +// +//===-----------------------------------------------------------------------===/ + +#ifndef LLVM_ADT_TYPESWITCH_H +#define LLVM_ADT_TYPESWITCH_H + +#include "llvm/ADT/Optional.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Support/Casting.h" + +namespace llvm { +namespace detail { + +template <typename DerivedT, typename T> class TypeSwitchBase { +public: + TypeSwitchBase(const T &value) : value(value) {} + TypeSwitchBase(TypeSwitchBase &&other) : value(other.value) {} + ~TypeSwitchBase() = default; + + /// TypeSwitchBase is not copyable. + TypeSwitchBase(const TypeSwitchBase &) = delete; + void operator=(const TypeSwitchBase &) = delete; + void operator=(TypeSwitchBase &&other) = delete; + + /// Invoke a case on the derived class with multiple case types. + template <typename CaseT, typename CaseT2, typename... CaseTs, + typename CallableT> + DerivedT &Case(CallableT &&caseFn) { + DerivedT &derived = static_cast<DerivedT &>(*this); + return derived.template Case<CaseT>(caseFn) + .template Case<CaseT2, CaseTs...>(caseFn); + } + + /// Invoke a case on the derived class, inferring the type of the Case from + /// the first input of the given callable. + /// Note: This inference rules for this overload are very simple: strip + /// pointers and references. + template <typename CallableT> DerivedT &Case(CallableT &&caseFn) { + using Traits = function_traits<std::decay_t<CallableT>>; + using CaseT = std::remove_cv_t<std::remove_pointer_t< + std::remove_reference_t<typename Traits::template arg_t<0>>>>; + + DerivedT &derived = static_cast<DerivedT &>(*this); + return derived.template Case<CaseT>(std::forward<CallableT>(caseFn)); + } + +protected: + /// Trait to check whether `ValueT` provides a 'dyn_cast' method with type + /// `CastT`. + template <typename ValueT, typename CastT> + using has_dyn_cast_t = + decltype(std::declval<ValueT &>().template dyn_cast<CastT>()); + + /// Attempt to dyn_cast the given `value` to `CastT`. This overload is + /// selected if `value` already has a suitable dyn_cast method. + template <typename CastT, typename ValueT> + static auto castValue( + ValueT value, + typename std::enable_if_t< + is_detected<has_dyn_cast_t, ValueT, CastT>::value> * = nullptr) { + return value.template dyn_cast<CastT>(); + } + + /// Attempt to dyn_cast the given `value` to `CastT`. This overload is + /// selected if llvm::dyn_cast should be used. + template <typename CastT, typename ValueT> + static auto castValue( + ValueT value, + typename std::enable_if_t< + !is_detected<has_dyn_cast_t, ValueT, CastT>::value> * = nullptr) { + return dyn_cast<CastT>(value); + } + + /// The root value we are switching on. + const T value; +}; +} // end namespace detail + +/// This class implements a switch-like dispatch statement for a value of 'T' +/// using dyn_cast functionality. Each `Case<T>` takes a callable to be invoked +/// if the root value isa<T>, the callable is invoked with the result of +/// dyn_cast<T>() as a parameter. +/// +/// Example: +/// Operation *op = ...; +/// LogicalResult result = TypeSwitch<Operation *, LogicalResult>(op) +/// .Case<ConstantOp>([](ConstantOp op) { ... }) +/// .Default([](Operation *op) { ... }); +/// +template <typename T, typename ResultT = void> +class TypeSwitch : public detail::TypeSwitchBase<TypeSwitch<T, ResultT>, T> { +public: + using BaseT = detail::TypeSwitchBase<TypeSwitch<T, ResultT>, T>; + using BaseT::BaseT; + using BaseT::Case; + TypeSwitch(TypeSwitch &&other) = default; + + /// Add a case on the given type. + template <typename CaseT, typename CallableT> + TypeSwitch<T, ResultT> &Case(CallableT &&caseFn) { + if (result) + return *this; + + // Check to see if CaseT applies to 'value'. + if (auto caseValue = BaseT::template castValue<CaseT>(this->value)) + result = caseFn(caseValue); + return *this; + } + + /// As a default, invoke the given callable within the root value. + template <typename CallableT> + LLVM_NODISCARD ResultT Default(CallableT &&defaultFn) { + if (result) + return std::move(*result); + return defaultFn(this->value); + } + + LLVM_NODISCARD + operator ResultT() { + assert(result && "Fell off the end of a type-switch"); + return std::move(*result); + } + +private: + /// The pointer to the result of this switch statement, once known, + /// null before that. + Optional<ResultT> result; +}; + +/// Specialization of TypeSwitch for void returning callables. +template <typename T> +class TypeSwitch<T, void> + : public detail::TypeSwitchBase<TypeSwitch<T, void>, T> { +public: + using BaseT = detail::TypeSwitchBase<TypeSwitch<T, void>, T>; + using BaseT::BaseT; + using BaseT::Case; + TypeSwitch(TypeSwitch &&other) = default; + + /// Add a case on the given type. + template <typename CaseT, typename CallableT> + TypeSwitch<T, void> &Case(CallableT &&caseFn) { + if (foundMatch) + return *this; + + // Check to see if any of the types apply to 'value'. + if (auto caseValue = BaseT::template castValue<CaseT>(this->value)) { + caseFn(caseValue); + foundMatch = true; + } + return *this; + } + + /// As a default, invoke the given callable within the root value. + template <typename CallableT> void Default(CallableT &&defaultFn) { + if (!foundMatch) + defaultFn(this->value); + } + +private: + /// A flag detailing if we have already found a match. + bool foundMatch = false; +}; +} // end namespace llvm + +#endif // LLVM_ADT_TYPESWITCH_H diff --git a/llvm/include/llvm/ADT/Waymarking.h b/llvm/include/llvm/ADT/Waymarking.h new file mode 100644 index 000000000000..f00bc106938f --- /dev/null +++ b/llvm/include/llvm/ADT/Waymarking.h @@ -0,0 +1,325 @@ +//===- Waymarking.h - Array waymarking algorithm ----------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Utility to backtrace an array's head, from a pointer into it. For the +// backtrace to work, we use "Waymarks", which are special tags embedded into +// the array's elements. +// +// A Tag of n-bits (in size) is composed as follows: +// +// bits: | n-1 | n-2 ... 0 | +// .---------.------------------------------------. +// |Stop Mask|(2^(n-1))-ary numeric system - digit| +// '---------'------------------------------------' +// +// Backtracing is done as follows: +// Walk back (starting from a given pointer to an element into the array), until +// a tag with a "Stop Mask" is reached. Then start calculating the "Offset" from +// the array's head, by picking up digits along the way, until another stop is +// reached. The "Offset" is then subtracted from the current pointer, and the +// result is the array's head. +// A special case - if we first encounter a Tag with a Stop and a zero digit, +// then this is already the head. +// +// For example: +// In case of 2 bits: +// +// Tags: +// x0 - binary digit 0 +// x1 - binary digit 1 +// 1x - stop and calculate (s) +// +// Array: +// .---.---.---.---.---.---.---.---.---.---.---.---.---.---.---.---. +// head -> |s0 |s1 | 0 |s1 | 0 | 0 |s1 | 1 | 1 |s1 | 0 | 1 | 0 |s1 | 0 | 1 | +// '---'---'---'---'---'---'---'---'---'---'---'---'---'---'---'---' +// |-1 |-2 |-4 |-7 |-10 |-14 +// <_ | | | | | | +// <_____ | | | | | +// <_____________ | | | | +// <_________________________ | | | +// <_____________________________________ | | +// <_____________________________________________________ | +// +// +// In case of 3 bits: +// +// Tags: +// x00 - quaternary digit 0 +// x01 - quaternary digit 1 +// x10 - quaternary digit 2 +// x11 - quaternary digit 3 +// 1xy - stop and calculate (s) +// +// Array: +// .---.---.---.---.---.---.---.---.---.---.---.---.---.---.---.---. +// head -> |s0 |s1 |s2 |s3 | 0 |s1 | 2 |s1 | 0 |s2 | 2 |s2 | 0 |s3 | 2 |s3 | +// '---'---'---'---'---'---'---'---'---'---'---'---'---'---'---'---' +// |-1 |-2 |-3 |-4 |-6 |-8 |-10 |-12 |-14 |-16 +// <_ | | | | | | | | | | +// <_____ | | | | | | | | | +// <_________ | | | | | | | | +// <_____________ | | | | | | | +// <_____________________ | | | | | | +// <_____________________________ | | | | | +// <_____________________________________ | | | | +// <_____________________________________________ | | | +// <_____________________________________________________ | | +// <_____________________________________________________________ | +// +// +// The API introduce 2 functions: +// 1. fillWaymarks +// 2. followWaymarks +// +// Example: +// int N = 10; +// int M = 5; +// int **A = new int *[N + M]; // Define the array. +// for (int I = 0; I < N + M; ++I) +// A[I] = new int(I); +// +// fillWaymarks(A, A + N); // Set the waymarks for the first N elements +// // of the array. +// // Note that it must be done AFTER we fill +// // the array's elements. +// +// ... // Elements which are not in the range +// // [A, A+N) will not be marked, and we won't +// // be able to call followWaymarks on them. +// +// ... // Elements which will be changed after the +// // call to fillWaymarks, will have to be +// // retagged. +// +// fillWaymarks(A + N, A + N + M, N); // Set the waymarks of the remaining M +// // elements. +// ... +// int **It = A + N + 1; +// int **B = followWaymarks(It); // Find the head of the array containing It. +// assert(B == A); +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_WAYMARKING_H +#define LLVM_ADT_WAYMARKING_H + +#include "llvm/ADT/STLExtras.h" +#include "llvm/Support/PointerLikeTypeTraits.h" + +namespace llvm { + +namespace detail { + +template <unsigned NumBits> struct WaymarkingTraits { + enum : unsigned { + // The number of bits of a Waymarking Tag. + NUM_BITS = NumBits, + + // A Tag is composed from a Mark and a Stop mask. + MARK_SIZE = NUM_BITS - 1, + STOP_MASK = (1 << MARK_SIZE), + MARK_MASK = (STOP_MASK - 1), + TAG_MASK = (MARK_MASK | STOP_MASK), + + // The number of pre-computed tags (for fast fill). + NUM_STATIC_TAGS = 32 + }; + +private: + // Add a new tag, calculated from Count and Stop, to the Vals pack, while + // continuing recursively to decrease Len down to 0. + template <unsigned Len, bool Stop, unsigned Count, uint8_t... Vals> + struct AddTag; + + // Delegate to the specialized AddTag according to the need of a Stop mask. + template <unsigned Len, unsigned Count, uint8_t... Vals> struct GenTag { + typedef + typename AddTag<Len, (Count <= MARK_MASK), Count, Vals...>::Xdata Xdata; + }; + + // Start adding tags while calculating the next Count, which is actually the + // number of already calculated tags (equivalent to the position in the + // array). + template <unsigned Len, uint8_t... Vals> struct GenOffset { + typedef typename GenTag<Len, sizeof...(Vals), Vals...>::Xdata Xdata; + }; + + // Add the tag and remove it from Count. + template <unsigned Len, unsigned Count, uint8_t... Vals> + struct AddTag<Len, false, Count, Vals...> { + typedef typename GenTag<Len - 1, (Count >> MARK_SIZE), Vals..., + Count & MARK_MASK>::Xdata Xdata; + }; + + // We have reached the end of this Count, so start with a new Count. + template <unsigned Len, unsigned Count, uint8_t... Vals> + struct AddTag<Len, true, Count, Vals...> { + typedef typename GenOffset<Len - 1, Vals..., + (Count & MARK_MASK) | STOP_MASK>::Xdata Xdata; + }; + + template <unsigned Count, uint8_t... Vals> struct TagsData { + // The remaining number for calculating the next tag, following the last one + // in Values. + static const unsigned Remain = Count; + + // The array of ordered pre-computed Tags. + static const uint8_t Values[sizeof...(Vals)]; + }; + + // Specialize the case when Len equals 0, as the recursion stop condition. + template <unsigned Count, uint8_t... Vals> + struct AddTag<0, false, Count, Vals...> { + typedef TagsData<Count, Vals...> Xdata; + }; + + template <unsigned Count, uint8_t... Vals> + struct AddTag<0, true, Count, Vals...> { + typedef TagsData<Count, Vals...> Xdata; + }; + +public: + typedef typename GenOffset<NUM_STATIC_TAGS>::Xdata Tags; +}; + +template <unsigned NumBits> +template <unsigned Count, uint8_t... Vals> +const uint8_t WaymarkingTraits<NumBits>::TagsData< + Count, Vals...>::Values[sizeof...(Vals)] = {Vals...}; + +} // end namespace detail + +/// This class is responsible for tagging (and retrieving the tag of) a given +/// element of type T. +template <class T, class WTraits = detail::WaymarkingTraits< + PointerLikeTypeTraits<T>::NumLowBitsAvailable>> +struct Waymarker { + using Traits = WTraits; + static void setWaymark(T &N, unsigned Tag) { N.setWaymark(Tag); } + static unsigned getWaymark(const T &N) { return N.getWaymark(); } +}; + +template <class T, class WTraits> struct Waymarker<T *, WTraits> { + using Traits = WTraits; + static void setWaymark(T *&N, unsigned Tag) { + reinterpret_cast<uintptr_t &>(N) |= static_cast<uintptr_t>(Tag); + } + static unsigned getWaymark(const T *N) { + return static_cast<unsigned>(reinterpret_cast<uintptr_t>(N)) & + Traits::TAG_MASK; + } +}; + +/// Sets up the waymarking algorithm's tags for a given range [Begin, End). +/// +/// \param Begin The beginning of the range to mark with tags (inclusive). +/// \param End The ending of the range to mark with tags (exclusive). +/// \param Offset The position in the supposed tags array from which to start +/// marking the given range. +template <class TIter, class Marker = Waymarker< + typename std::iterator_traits<TIter>::value_type>> +void fillWaymarks(TIter Begin, TIter End, size_t Offset = 0) { + if (Begin == End) + return; + + size_t Count = Marker::Traits::Tags::Remain; + if (Offset <= Marker::Traits::NUM_STATIC_TAGS) { + // Start by filling the pre-calculated tags, starting from the given offset. + while (Offset != Marker::Traits::NUM_STATIC_TAGS) { + Marker::setWaymark(*Begin, Marker::Traits::Tags::Values[Offset]); + + ++Offset; + ++Begin; + + if (Begin == End) + return; + } + } else { + // The given offset is larger than the number of pre-computed tags, so we + // must do it the hard way. + // Calculate the next remaining Count, as if we have filled the tags up to + // the given offset. + size_t Off = Marker::Traits::NUM_STATIC_TAGS; + do { + ++Off; + + unsigned Tag = Count & Marker::Traits::MARK_MASK; + + // If the count can fit into the tag, then the counting must stop. + if (Count <= Marker::Traits::MARK_MASK) { + Tag |= Marker::Traits::STOP_MASK; + Count = Off; + } else + Count >>= Marker::Traits::MARK_SIZE; + } while (Off != Offset); + } + + // By now, we have the matching remaining Count for the current offset. + do { + ++Offset; + + unsigned Tag = Count & Marker::Traits::MARK_MASK; + + // If the count can fit into the tag, then the counting must stop. + if (Count <= Marker::Traits::MARK_MASK) { + Tag |= Marker::Traits::STOP_MASK; + Count = Offset; + } else + Count >>= Marker::Traits::MARK_SIZE; + + Marker::setWaymark(*Begin, Tag); + ++Begin; + } while (Begin != End); +} + +/// Sets up the waymarking algorithm's tags for a given range. +/// +/// \param Range The range to mark with tags. +/// \param Offset The position in the supposed tags array from which to start +/// marking the given range. +template <typename R, class Marker = Waymarker<typename std::remove_reference< + decltype(*std::begin(std::declval<R &>()))>::type>> +void fillWaymarks(R &&Range, size_t Offset = 0) { + return fillWaymarks<decltype(std::begin(std::declval<R &>())), Marker>( + adl_begin(Range), adl_end(Range), Offset); +} + +/// Retrieves the element marked with tag of only STOP_MASK, by following the +/// waymarks. This is the first element in a range passed to a previous call to +/// \c fillWaymarks with \c Offset 0. +/// +/// For the trivial usage of calling \c fillWaymarks(Array), and \I is an +/// iterator inside \c Array, this function retrieves the head of \c Array, by +/// following the waymarks. +/// +/// \param I The iterator into an array which was marked by the waymarking tags +/// (by a previous call to \c fillWaymarks). +template <class TIter, class Marker = Waymarker< + typename std::iterator_traits<TIter>::value_type>> +TIter followWaymarks(TIter I) { + unsigned Tag; + do + Tag = Marker::getWaymark(*I--); + while (!(Tag & Marker::Traits::STOP_MASK)); + + // Special case for the first Use. + if (Tag != Marker::Traits::STOP_MASK) { + ptrdiff_t Offset = Tag & Marker::Traits::MARK_MASK; + while (!((Tag = Marker::getWaymark(*I)) & Marker::Traits::STOP_MASK)) { + Offset = (Offset << Marker::Traits::MARK_SIZE) + Tag; + --I; + } + I -= Offset; + } + return ++I; +} + +} // end namespace llvm + +#endif // LLVM_ADT_WAYMARKING_H diff --git a/llvm/include/llvm/ADT/bit.h b/llvm/include/llvm/ADT/bit.h index a790d5ed2d21..d76bc6c6046c 100644 --- a/llvm/include/llvm/ADT/bit.h +++ b/llvm/include/llvm/ADT/bit.h @@ -22,23 +22,28 @@ namespace llvm { // This implementation of bit_cast is different from the C++17 one in two ways: // - It isn't constexpr because that requires compiler support. // - It requires trivially-constructible To, to avoid UB in the implementation. -template <typename To, typename From - , typename = typename std::enable_if<sizeof(To) == sizeof(From)>::type +template < + typename To, typename From, + typename = std::enable_if_t<sizeof(To) == sizeof(From)> #if (__has_feature(is_trivially_constructible) && defined(_LIBCPP_VERSION)) || \ (defined(__GNUC__) && __GNUC__ >= 5) - , typename = typename std::is_trivially_constructible<To>::type + , + typename = std::enable_if_t<std::is_trivially_constructible<To>::value> #elif __has_feature(is_trivially_constructible) - , typename = typename std::enable_if<__is_trivially_constructible(To)>::type + , + typename = std::enable_if_t<__is_trivially_constructible(To)> #else // See comment below. #endif #if (__has_feature(is_trivially_copyable) && defined(_LIBCPP_VERSION)) || \ (defined(__GNUC__) && __GNUC__ >= 5) - , typename = typename std::enable_if<std::is_trivially_copyable<To>::value>::type - , typename = typename std::enable_if<std::is_trivially_copyable<From>::value>::type + , + typename = std::enable_if_t<std::is_trivially_copyable<To>::value>, + typename = std::enable_if_t<std::is_trivially_copyable<From>::value> #elif __has_feature(is_trivially_copyable) - , typename = typename std::enable_if<__is_trivially_copyable(To)>::type - , typename = typename std::enable_if<__is_trivially_copyable(From)>::type + , + typename = std::enable_if_t<__is_trivially_copyable(To)>, + typename = std::enable_if_t<__is_trivially_copyable(From)> #else // This case is GCC 4.x. clang with libc++ or libstdc++ never get here. Unlike // llvm/Support/type_traits.h's is_trivially_copyable we don't want to @@ -46,7 +51,7 @@ template <typename To, typename From // compilation failures on the bots instead of locally. That's acceptable // because it's very few developers, and only until we move past C++11. #endif -> + > inline To bit_cast(const From &from) noexcept { To to; std::memcpy(&to, &from, sizeof(To)); diff --git a/llvm/include/llvm/ADT/fallible_iterator.h b/llvm/include/llvm/ADT/fallible_iterator.h index 6501ad2233cd..a196d8866b51 100644 --- a/llvm/include/llvm/ADT/fallible_iterator.h +++ b/llvm/include/llvm/ADT/fallible_iterator.h @@ -86,7 +86,7 @@ public: return fallible_iterator(std::move(I), &Err); } - /// Construct a fallible iteratro that can be used as an end-of-range value. + /// Construct a fallible iterator that can be used as an end-of-range value. /// /// A value created by this method can be dereferenced (if the underlying /// value points at a valid value) and compared, but not incremented or @@ -96,12 +96,10 @@ public: } /// Forward dereference to the underlying iterator. - auto operator*() -> decltype(*std::declval<Underlying>()) { return *I; } + decltype(auto) operator*() { return *I; } /// Forward const dereference to the underlying iterator. - auto operator*() const -> decltype(*std::declval<const Underlying>()) { - return *I; - } + decltype(auto) operator*() const { return *I; } /// Forward structure dereference to the underlying iterator (if the /// underlying iterator supports it). diff --git a/llvm/include/llvm/ADT/ilist.h b/llvm/include/llvm/ADT/ilist.h index 06c7abff965f..d5a1f286b177 100644 --- a/llvm/include/llvm/ADT/ilist.h +++ b/llvm/include/llvm/ADT/ilist.h @@ -198,10 +198,12 @@ public: iplist_impl &operator=(const iplist_impl &) = delete; iplist_impl(iplist_impl &&X) - : TraitsT(std::move(X)), IntrusiveListT(std::move(X)) {} + : TraitsT(std::move(static_cast<TraitsT &>(X))), + IntrusiveListT(std::move(static_cast<IntrusiveListT &>(X))) {} iplist_impl &operator=(iplist_impl &&X) { - *static_cast<TraitsT *>(this) = std::move(X); - *static_cast<IntrusiveListT *>(this) = std::move(X); + *static_cast<TraitsT *>(this) = std::move(static_cast<TraitsT &>(X)); + *static_cast<IntrusiveListT *>(this) = + std::move(static_cast<IntrusiveListT &>(X)); return *this; } diff --git a/llvm/include/llvm/ADT/ilist_iterator.h b/llvm/include/llvm/ADT/ilist_iterator.h index cbe5cefa96d1..be876347907b 100644 --- a/llvm/include/llvm/ADT/ilist_iterator.h +++ b/llvm/include/llvm/ADT/ilist_iterator.h @@ -88,15 +88,14 @@ public: // This is templated so that we can allow constructing a const iterator from // a nonconst iterator... template <bool RHSIsConst> - ilist_iterator( - const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS, - typename std::enable_if<IsConst || !RHSIsConst, void *>::type = nullptr) + ilist_iterator(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS, + std::enable_if_t<IsConst || !RHSIsConst, void *> = nullptr) : NodePtr(RHS.NodePtr) {} // This is templated so that we can allow assigning to a const iterator from // a nonconst iterator... template <bool RHSIsConst> - typename std::enable_if<IsConst || !RHSIsConst, ilist_iterator &>::type + std::enable_if_t<IsConst || !RHSIsConst, ilist_iterator &> operator=(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS) { NodePtr = RHS.NodePtr; return *this; diff --git a/llvm/include/llvm/ADT/iterator.h b/llvm/include/llvm/ADT/iterator.h index 8fd5c11a2dcb..9a1f6e1511e7 100644 --- a/llvm/include/llvm/ADT/iterator.h +++ b/llvm/include/llvm/ADT/iterator.h @@ -194,14 +194,14 @@ template < typename T = typename std::iterator_traits<WrappedIteratorT>::value_type, typename DifferenceTypeT = typename std::iterator_traits<WrappedIteratorT>::difference_type, - typename PointerT = typename std::conditional< + typename PointerT = std::conditional_t< std::is_same<T, typename std::iterator_traits< WrappedIteratorT>::value_type>::value, - typename std::iterator_traits<WrappedIteratorT>::pointer, T *>::type, - typename ReferenceT = typename std::conditional< + typename std::iterator_traits<WrappedIteratorT>::pointer, T *>, + typename ReferenceT = std::conditional_t< std::is_same<T, typename std::iterator_traits< WrappedIteratorT>::value_type>::value, - typename std::iterator_traits<WrappedIteratorT>::reference, T &>::type> + typename std::iterator_traits<WrappedIteratorT>::reference, T &>> class iterator_adaptor_base : public iterator_facade_base<DerivedT, IteratorCategoryT, T, DifferenceTypeT, PointerT, ReferenceT> { @@ -281,8 +281,8 @@ public: /// using iterator = pointee_iterator<SmallVectorImpl<T *>::iterator>; /// \endcode template <typename WrappedIteratorT, - typename T = typename std::remove_reference< - decltype(**std::declval<WrappedIteratorT>())>::type> + typename T = std::remove_reference_t<decltype( + **std::declval<WrappedIteratorT>())>> struct pointee_iterator : iterator_adaptor_base< pointee_iterator<WrappedIteratorT, T>, WrappedIteratorT, @@ -334,9 +334,11 @@ make_pointer_range(RangeT &&Range) { } template <typename WrappedIteratorT, - typename T1 = typename std::remove_reference<decltype(**std::declval<WrappedIteratorT>())>::type, - typename T2 = typename std::add_pointer<T1>::type> -using raw_pointer_iterator = pointer_iterator<pointee_iterator<WrappedIteratorT, T1>, T2>; + typename T1 = std::remove_reference_t<decltype( + **std::declval<WrappedIteratorT>())>, + typename T2 = std::add_pointer_t<T1>> +using raw_pointer_iterator = + pointer_iterator<pointee_iterator<WrappedIteratorT, T1>, T2>; // Wrapper iterator over iterator ItType, adding DataRef to the type of ItType, // to create NodeRef = std::pair<InnerTypeOfItType, DataRef>. |