summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/vm/uma_core.c4
-rw-r--r--sys/vm/uma_dbg.c6
-rw-r--r--sys/vm/vm_object.c2
3 files changed, 7 insertions, 5 deletions
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 886415088bc0..443c1978e3f4 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -1348,7 +1348,9 @@ zalloc_start:
("uma_zalloc: Bucket pointer mangled."));
cache->uc_allocs++;
#ifdef INVARIANTS
+ ZONE_LOCK(zone);
uma_dbg_alloc(zone, NULL, item);
+ ZONE_UNLOCK(zone);
#endif
CPU_UNLOCK(zone, cpu);
if (zone->uz_ctor)
@@ -1698,10 +1700,12 @@ zfree_start:
("uma_zfree: Freeing to non free bucket index."));
bucket->ub_bucket[bucket->ub_ptr] = item;
#ifdef INVARIANTS
+ ZONE_LOCK(zone);
if (zone->uz_flags & UMA_ZFLAG_MALLOC)
uma_dbg_free(zone, udata, item);
else
uma_dbg_free(zone, NULL, item);
+ ZONE_UNLOCK(zone);
#endif
CPU_UNLOCK(zone, cpu);
return;
diff --git a/sys/vm/uma_dbg.c b/sys/vm/uma_dbg.c
index a1009e95ee3c..ebdba40c95a8 100644
--- a/sys/vm/uma_dbg.c
+++ b/sys/vm/uma_dbg.c
@@ -199,9 +199,7 @@ uma_dbg_getslab(uma_zone_t zone, void *item)
if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
slab = vtoslab((vm_offset_t)mem);
} else if (zone->uz_flags & UMA_ZFLAG_HASH) {
- ZONE_LOCK(zone);
slab = hash_sfind(&zone->uz_hash, mem);
- ZONE_UNLOCK(zone);
} else {
mem += zone->uz_pgoff;
slab = (uma_slab_t)mem;
@@ -230,7 +228,7 @@ uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
freei = ((unsigned long)item - (unsigned long)slab->us_data)
/ zone->uz_rsize;
- atomic_set_8(&slab->us_freelist[freei], 255);
+ slab->us_freelist[freei] = 255;
return;
}
@@ -279,5 +277,5 @@ uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
* Until then the count of valid slabs will make sure we don't
* accidentally follow this and assume it's a valid index.
*/
- atomic_set_8(&slab->us_freelist[freei], 0);
+ slab->us_freelist[freei] = 0;
}
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 3a274422b3de..29b45b057ff7 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -225,7 +225,7 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
object->hash_rand = exp - 129;
} while (!atomic_cmpset_int(&object_hash_rand, exp, object->hash_rand));
- object->generation++; /* atomicity needed? XXX */
+ atomic_add_int(&object->generation, 1);
mtx_lock(&vm_object_list_mtx);
TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);