diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2015-05-27 18:44:32 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2015-05-27 18:44:32 +0000 |
commit | 5a5ac124e1efaf208671f01c46edb15f29ed2a0b (patch) | |
tree | a6140557876943cdd800ee997c9317283394b22c /include/llvm/Transforms/IPO/LowerBitSets.h | |
parent | f03b5bed27d0d2eafd68562ce14f8b5e3f1f0801 (diff) |
Diffstat (limited to 'include/llvm/Transforms/IPO/LowerBitSets.h')
-rw-r--r-- | include/llvm/Transforms/IPO/LowerBitSets.h | 198 |
1 files changed, 198 insertions, 0 deletions
diff --git a/include/llvm/Transforms/IPO/LowerBitSets.h b/include/llvm/Transforms/IPO/LowerBitSets.h new file mode 100644 index 000000000000..55d7d84560a0 --- /dev/null +++ b/include/llvm/Transforms/IPO/LowerBitSets.h @@ -0,0 +1,198 @@ +//===- LowerBitSets.h - Bitset lowering pass --------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines parts of the bitset lowering pass implementation that may +// be usefully unit tested. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_IPO_LOWERBITSETS_H +#define LLVM_TRANSFORMS_IPO_LOWERBITSETS_H + +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" + +#include <stdint.h> +#include <limits> +#include <set> +#include <vector> + +namespace llvm { + +class DataLayout; +class GlobalVariable; +class Value; + +struct BitSetInfo { + // The indices of the set bits in the bitset. + std::set<uint64_t> Bits; + + // The byte offset into the combined global represented by the bitset. + uint64_t ByteOffset; + + // The size of the bitset in bits. + uint64_t BitSize; + + // Log2 alignment of the bit set relative to the combined global. + // For example, a log2 alignment of 3 means that bits in the bitset + // represent addresses 8 bytes apart. + unsigned AlignLog2; + + bool isSingleOffset() const { + return Bits.size() == 1; + } + + bool isAllOnes() const { + return Bits.size() == BitSize; + } + + bool containsGlobalOffset(uint64_t Offset) const; + + bool containsValue(const DataLayout &DL, + const DenseMap<GlobalVariable *, uint64_t> &GlobalLayout, + Value *V, uint64_t COffset = 0) const; +}; + +struct BitSetBuilder { + SmallVector<uint64_t, 16> Offsets; + uint64_t Min, Max; + + BitSetBuilder() : Min(std::numeric_limits<uint64_t>::max()), Max(0) {} + + void addOffset(uint64_t Offset) { + if (Min > Offset) + Min = Offset; + if (Max < Offset) + Max = Offset; + + Offsets.push_back(Offset); + } + + BitSetInfo build(); +}; + +/// This class implements a layout algorithm for globals referenced by bit sets +/// that tries to keep members of small bit sets together. This can +/// significantly reduce bit set sizes in many cases. +/// +/// It works by assembling fragments of layout from sets of referenced globals. +/// Each set of referenced globals causes the algorithm to create a new +/// fragment, which is assembled by appending each referenced global in the set +/// into the fragment. If a referenced global has already been referenced by an +/// fragment created earlier, we instead delete that fragment and append its +/// contents into the fragment we are assembling. +/// +/// By starting with the smallest fragments, we minimize the size of the +/// fragments that are copied into larger fragments. This is most intuitively +/// thought about when considering the case where the globals are virtual tables +/// and the bit sets represent their derived classes: in a single inheritance +/// hierarchy, the optimum layout would involve a depth-first search of the +/// class hierarchy (and in fact the computed layout ends up looking a lot like +/// a DFS), but a naive DFS would not work well in the presence of multiple +/// inheritance. This aspect of the algorithm ends up fitting smaller +/// hierarchies inside larger ones where that would be beneficial. +/// +/// For example, consider this class hierarchy: +/// +/// A B +/// \ / | \ +/// C D E +/// +/// We have five bit sets: bsA (A, C), bsB (B, C, D, E), bsC (C), bsD (D) and +/// bsE (E). If we laid out our objects by DFS traversing B followed by A, our +/// layout would be {B, C, D, E, A}. This is optimal for bsB as it needs to +/// cover the only 4 objects in its hierarchy, but not for bsA as it needs to +/// cover 5 objects, i.e. the entire layout. Our algorithm proceeds as follows: +/// +/// Add bsC, fragments {{C}} +/// Add bsD, fragments {{C}, {D}} +/// Add bsE, fragments {{C}, {D}, {E}} +/// Add bsA, fragments {{A, C}, {D}, {E}} +/// Add bsB, fragments {{B, A, C, D, E}} +/// +/// This layout is optimal for bsA, as it now only needs to cover two (i.e. 3 +/// fewer) objects, at the cost of bsB needing to cover 1 more object. +/// +/// The bit set lowering pass assigns an object index to each object that needs +/// to be laid out, and calls addFragment for each bit set passing the object +/// indices of its referenced globals. It then assembles a layout from the +/// computed layout in the Fragments field. +struct GlobalLayoutBuilder { + /// The computed layout. Each element of this vector contains a fragment of + /// layout (which may be empty) consisting of object indices. + std::vector<std::vector<uint64_t>> Fragments; + + /// Mapping from object index to fragment index. + std::vector<uint64_t> FragmentMap; + + GlobalLayoutBuilder(uint64_t NumObjects) + : Fragments(1), FragmentMap(NumObjects) {} + + /// Add F to the layout while trying to keep its indices contiguous. + /// If a previously seen fragment uses any of F's indices, that + /// fragment will be laid out inside F. + void addFragment(const std::set<uint64_t> &F); +}; + +/// This class is used to build a byte array containing overlapping bit sets. By +/// loading from indexed offsets into the byte array and applying a mask, a +/// program can test bits from the bit set with a relatively short instruction +/// sequence. For example, suppose we have 15 bit sets to lay out: +/// +/// A (16 bits), B (15 bits), C (14 bits), D (13 bits), E (12 bits), +/// F (11 bits), G (10 bits), H (9 bits), I (7 bits), J (6 bits), K (5 bits), +/// L (4 bits), M (3 bits), N (2 bits), O (1 bit) +/// +/// These bits can be laid out in a 16-byte array like this: +/// +/// Byte Offset +/// 0123456789ABCDEF +/// Bit +/// 7 HHHHHHHHHIIIIIII +/// 6 GGGGGGGGGGJJJJJJ +/// 5 FFFFFFFFFFFKKKKK +/// 4 EEEEEEEEEEEELLLL +/// 3 DDDDDDDDDDDDDMMM +/// 2 CCCCCCCCCCCCCCNN +/// 1 BBBBBBBBBBBBBBBO +/// 0 AAAAAAAAAAAAAAAA +/// +/// For example, to test bit X of A, we evaluate ((bits[X] & 1) != 0), or to +/// test bit X of I, we evaluate ((bits[9 + X] & 0x80) != 0). This can be done +/// in 1-2 machine instructions on x86, or 4-6 instructions on ARM. +/// +/// This is a byte array, rather than (say) a 2-byte array or a 4-byte array, +/// because for one thing it gives us better packing (the more bins there are, +/// the less evenly they will be filled), and for another, the instruction +/// sequences can be slightly shorter, both on x86 and ARM. +struct ByteArrayBuilder { + /// The byte array built so far. + std::vector<uint8_t> Bytes; + + enum { BitsPerByte = 8 }; + + /// The number of bytes allocated so far for each of the bits. + uint64_t BitAllocs[BitsPerByte]; + + ByteArrayBuilder() { + memset(BitAllocs, 0, sizeof(BitAllocs)); + } + + /// Allocate BitSize bits in the byte array where Bits contains the bits to + /// set. AllocByteOffset is set to the offset within the byte array and + /// AllocMask is set to the bitmask for those bits. This uses the LPT (Longest + /// Processing Time) multiprocessor scheduling algorithm to lay out the bits + /// efficiently; the pass allocates bit sets in decreasing size order. + void allocate(const std::set<uint64_t> &Bits, uint64_t BitSize, + uint64_t &AllocByteOffset, uint8_t &AllocMask); +}; + +} // namespace llvm + +#endif |