diff options
| author | Dimitry Andric <dim@FreeBSD.org> | 2016-04-29 19:16:59 +0000 | 
|---|---|---|
| committer | Dimitry Andric <dim@FreeBSD.org> | 2016-04-29 19:16:59 +0000 | 
| commit | c94cceea9c2262c5b2ad5f215bb9a8ae48b02764 (patch) | |
| tree | dbb8174cbf6f1cc45b8de8031b18adb4030509a8 /subversion/libsvn_subr/cache-membuffer.c | |
| parent | dc5d469d6574e9fb03bdd793658bb371315b306a (diff) | |
Notes
Diffstat (limited to 'subversion/libsvn_subr/cache-membuffer.c')
| -rw-r--r-- | subversion/libsvn_subr/cache-membuffer.c | 37 | 
1 files changed, 8 insertions, 29 deletions
| diff --git a/subversion/libsvn_subr/cache-membuffer.c b/subversion/libsvn_subr/cache-membuffer.c index 8aeaf775d771..87ac96168b0f 100644 --- a/subversion/libsvn_subr/cache-membuffer.c +++ b/subversion/libsvn_subr/cache-membuffer.c @@ -139,6 +139,10 @@  #endif  /* For more efficient copy operations, let's align all data items properly. + * Since we can't portably align pointers, this is rather the item size + * granularity which ensures *relative* alignment within the cache - still + * giving us decent copy speeds on most machines. + *   * Must be a power of 2.   */  #define ITEM_ALIGNMENT 16 @@ -632,10 +636,6 @@ struct svn_membuffer_t   */  #define ALIGN_VALUE(value) (((value) + ITEM_ALIGNMENT-1) & -ITEM_ALIGNMENT) -/* Align POINTER value to the next ITEM_ALIGNMENT boundary. - */ -#define ALIGN_POINTER(pointer) ((void*)ALIGN_VALUE((apr_size_t)(char*)(pointer))) -  /* If locking is supported for CACHE, acquire a read lock for it.   */  static svn_error_t * @@ -1643,28 +1643,6 @@ ensure_data_insertable_l1(svn_membuffer_t *cache, apr_size_t size)     * right answer. */  } -/* Mimic apr_pcalloc in APR_POOL_DEBUG mode, i.e. handle failed allocations - * (e.g. OOM) properly: Allocate at least SIZE bytes from POOL and zero - * the content of the allocated memory if ZERO has been set. Return NULL - * upon failed allocations. - * - * Also, satisfy our buffer alignment needs for performance reasons. - */ -static void* secure_aligned_alloc(apr_pool_t *pool, -                                  apr_size_t size, -                                  svn_boolean_t zero) -{ -  void* memory = apr_palloc(pool, size + ITEM_ALIGNMENT); -  if (memory != NULL) -    { -      memory = ALIGN_POINTER(memory); -      if (zero) -        memset(memory, 0, size); -    } - -  return memory; -} -  svn_error_t *  svn_cache__membuffer_cache_create(svn_membuffer_t **cache,                                    apr_size_t total_size, @@ -1822,10 +1800,11 @@ svn_cache__membuffer_cache_create(svn_membuffer_t **cache,        c[seg].l2.last = NO_INDEX;        c[seg].l2.next = NO_INDEX;        c[seg].l2.start_offset = c[seg].l1.size; -      c[seg].l2.size = data_size - c[seg].l1.size; +      c[seg].l2.size = ALIGN_VALUE(data_size) - c[seg].l1.size;        c[seg].l2.current_data = c[seg].l2.start_offset; -      c[seg].data = secure_aligned_alloc(pool, (apr_size_t)data_size, FALSE); +      /* This cast is safe because DATA_SIZE <= MAX_SEGMENT_SIZE. */ +      c[seg].data = apr_palloc(pool, (apr_size_t)ALIGN_VALUE(data_size));        c[seg].data_used = 0;        c[seg].max_entry_size = max_entry_size; @@ -2204,7 +2183,7 @@ membuffer_cache_get_internal(svn_membuffer_t *cache,      }    size = ALIGN_VALUE(entry->size) - entry->key.key_len; -  *buffer = ALIGN_POINTER(apr_palloc(result_pool, size + ITEM_ALIGNMENT-1)); +  *buffer = apr_palloc(result_pool, size);    memcpy(*buffer, cache->data + entry->offset + entry->key.key_len, size);  #ifdef SVN_DEBUG_CACHE_MEMBUFFER | 
