summaryrefslogtreecommitdiff
path: root/lib/scudo/standalone/wrappers_c.inc
blob: a9adbc83588bac869bf5e4db992c21fce98ba187 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
//===-- wrappers_c.inc ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef SCUDO_PREFIX
#error "Define SCUDO_PREFIX prior to including this file!"
#endif

// malloc-type functions have to be aligned to std::max_align_t. This is
// distinct from (1U << SCUDO_MIN_ALIGNMENT_LOG), since C++ new-type functions
// do not have to abide by the same requirement.
#ifndef SCUDO_MALLOC_ALIGNMENT
#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U)
#endif

INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
  scudo::uptr Product;
  if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) {
    if (SCUDO_ALLOCATOR.canReturnNull()) {
      errno = ENOMEM;
      return nullptr;
    }
    scudo::reportCallocOverflow(nmemb, size);
  }
  return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
      Product, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT, true));
}

INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) {
  SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
}

INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) {
  struct SCUDO_MALLINFO Info = {};
  scudo::StatCounters Stats;
  SCUDO_ALLOCATOR.getStats(Stats);
  // Space allocated in mmapped regions (bytes)
  Info.hblkhd = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatMapped]);
  // Maximum total allocated space (bytes)
  Info.usmblks = Info.hblkhd;
  // Space in freed fastbin blocks (bytes)
  Info.fsmblks = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatFree]);
  // Total allocated space (bytes)
  Info.uordblks =
      static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatAllocated]);
  // Total free space (bytes)
  Info.fordblks = Info.fsmblks;
  return Info;
}

INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) {
  return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
      size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
}

#if SCUDO_ANDROID
INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(const void *ptr) {
#else
INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(void *ptr) {
#endif
  return SCUDO_ALLOCATOR.getUsableSize(ptr);
}

INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
  // Android rounds up the alignment to a power of two if it isn't one.
  if (SCUDO_ANDROID) {
    if (UNLIKELY(!alignment)) {
      alignment = 1U;
    } else {
      if (UNLIKELY(!scudo::isPowerOfTwo(alignment)))
        alignment = scudo::roundUpToPowerOfTwo(alignment);
    }
  } else {
    if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) {
      if (SCUDO_ALLOCATOR.canReturnNull()) {
        errno = EINVAL;
        return nullptr;
      }
      scudo::reportAlignmentNotPowerOfTwo(alignment);
    }
  }
  return SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign,
                                  alignment);
}

INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
                                                size_t size) {
  if (UNLIKELY(scudo::checkPosixMemalignAlignment(alignment))) {
    if (!SCUDO_ALLOCATOR.canReturnNull())
      scudo::reportInvalidPosixMemalignAlignment(alignment);
    return EINVAL;
  }
  void *Ptr =
      SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
  if (UNLIKELY(!Ptr))
    return ENOMEM;
  *memptr = Ptr;
  return 0;
}

INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
  const scudo::uptr PageSize = scudo::getPageSizeCached();
  if (UNLIKELY(scudo::checkForPvallocOverflow(size, PageSize))) {
    if (SCUDO_ALLOCATOR.canReturnNull()) {
      errno = ENOMEM;
      return nullptr;
    }
    scudo::reportPvallocOverflow(size);
  }
  // pvalloc(0) should allocate one page.
  return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
      size ? scudo::roundUpTo(size, PageSize) : PageSize,
      scudo::Chunk::Origin::Memalign, PageSize));
}

INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
  if (!ptr)
    return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
        size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
  if (size == 0) {
    SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
    return nullptr;
  }
  return scudo::setErrnoOnNull(
      SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT));
}

INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) {
  return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
      size, scudo::Chunk::Origin::Memalign, scudo::getPageSizeCached()));
}

// Bionic wants a function named PREFIX_iterate and not PREFIX_malloc_iterate
// which is somewhat inconsistent with the rest, workaround that.
#if SCUDO_ANDROID && _BIONIC
#define SCUDO_ITERATE iterate
#else
#define SCUDO_ITERATE malloc_iterate
#endif

INTERFACE WEAK int SCUDO_PREFIX(SCUDO_ITERATE)(
    uintptr_t base, size_t size,
    void (*callback)(uintptr_t base, size_t size, void *arg), void *arg) {
  SCUDO_ALLOCATOR.iterateOverChunks(base, size, callback, arg);
  return 0;
}

INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() {
  SCUDO_ALLOCATOR.disable();
}

INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { SCUDO_ALLOCATOR.enable(); }

INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, UNUSED int value) {
  if (param == M_DECAY_TIME) {
    // TODO(kostyak): set release_to_os_interval_ms accordingly.
    return 1;
  } else if (param == M_PURGE) {
    SCUDO_ALLOCATOR.releaseToOS();
    return 1;
  }
  return 0;
}

INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
                                                 size_t size) {
  if (UNLIKELY(scudo::checkAlignedAllocAlignmentAndSize(alignment, size))) {
    if (SCUDO_ALLOCATOR.canReturnNull()) {
      errno = EINVAL;
      return nullptr;
    }
    scudo::reportInvalidAlignedAllocAlignment(alignment, size);
  }
  return scudo::setErrnoOnNull(
      SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment));
}

INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
  fputs("<malloc version=\"scudo-1\">", stream);
  fputs("</malloc>", stream);
  return 0;
}