summaryrefslogtreecommitdiff
path: root/lib/isc/stats.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/isc/stats.c')
-rw-r--r--lib/isc/stats.c78
1 files changed, 56 insertions, 22 deletions
diff --git a/lib/isc/stats.c b/lib/isc/stats.c
index 03d2a8a7d103..368dadfc323e 100644
--- a/lib/isc/stats.c
+++ b/lib/isc/stats.c
@@ -35,13 +35,45 @@
#define ISC_STATS_MAGIC ISC_MAGIC('S', 't', 'a', 't')
#define ISC_STATS_VALID(x) ISC_MAGIC_VALID(x, ISC_STATS_MAGIC)
-#ifndef ISC_STATS_USEMULTIFIELDS
-#if defined(ISC_RWLOCK_USEATOMIC) && defined(ISC_PLATFORM_HAVEXADD) && !defined(ISC_PLATFORM_HAVEXADDQ)
+/*%
+ * Local macro confirming prescence of 64-bit
+ * increment and store operations, just to make
+ * the later macros simpler
+ */
+#if defined(ISC_PLATFORM_HAVEXADDQ) && defined(ISC_PLATFORM_HAVEATOMICSTOREQ)
+#define ISC_STATS_HAVEATOMICQ 1
+#else
+#define ISC_STATS_HAVEATOMICQ 0
+#endif
+
+/*%
+ * Only lock the counters if 64-bit atomic operations are
+ * not available but cheap atomic lock operations are.
+ * On a modern 64-bit system this should never be the case.
+ *
+ * Normal locks are too expensive to be used whenever a counter
+ * is updated.
+ */
+#if !ISC_STATS_HAVEATOMICQ && defined(ISC_RWLOCK_HAVEATOMIC)
+#define ISC_STATS_LOCKCOUNTERS 1
+#else
+#define ISC_STATS_LOCKCOUNTERS 0
+#endif
+
+/*%
+ * If 64-bit atomic operations are not available but
+ * 32-bit operations are then split the counter into two,
+ * using the atomic operations to try to ensure that any carry
+ * from the low word is correctly carried into the high word.
+ *
+ * Otherwise, just rely on standard 64-bit data types
+ * and operations
+ */
+#if !ISC_STATS_HAVEATOMICQ && defined(ISC_PLATFORM_HAVEXADD)
#define ISC_STATS_USEMULTIFIELDS 1
#else
#define ISC_STATS_USEMULTIFIELDS 0
#endif
-#endif /* ISC_STATS_USEMULTIFIELDS */
#if ISC_STATS_USEMULTIFIELDS
typedef struct {
@@ -65,7 +97,7 @@ struct isc_stats {
* Locked by counterlock or unlocked if efficient rwlock is not
* available.
*/
-#ifdef ISC_RWLOCK_USEATOMIC
+#if ISC_STATS_LOCKCOUNTERS
isc_rwlock_t counterlock;
#endif
isc_stat_t *counters;
@@ -111,7 +143,7 @@ create_stats(isc_mem_t *mctx, int ncounters, isc_stats_t **statsp) {
goto clean_counters;
}
-#ifdef ISC_RWLOCK_USEATOMIC
+#if ISC_STATS_LOCKCOUNTERS
result = isc_rwlock_init(&stats->counterlock, 0, 0);
if (result != ISC_R_SUCCESS)
goto clean_copiedcounters;
@@ -131,7 +163,7 @@ create_stats(isc_mem_t *mctx, int ncounters, isc_stats_t **statsp) {
clean_counters:
isc_mem_put(mctx, stats->counters, sizeof(isc_stat_t) * ncounters);
-#ifdef ISC_RWLOCK_USEATOMIC
+#if ISC_STATS_LOCKCOUNTERS
clean_copiedcounters:
isc_mem_put(mctx, stats->copiedcounters,
sizeof(isc_stat_t) * ncounters);
@@ -177,7 +209,7 @@ isc_stats_detach(isc_stats_t **statsp) {
sizeof(isc_stat_t) * stats->ncounters);
UNLOCK(&stats->lock);
DESTROYLOCK(&stats->lock);
-#ifdef ISC_RWLOCK_USEATOMIC
+#if ISC_STATS_LOCKCOUNTERS
isc_rwlock_destroy(&stats->counterlock);
#endif
isc_mem_putanddetach(&stats->mctx, stats, sizeof(*stats));
@@ -198,7 +230,7 @@ static inline void
incrementcounter(isc_stats_t *stats, int counter) {
isc_int32_t prev;
-#ifdef ISC_RWLOCK_USEATOMIC
+#if ISC_STATS_LOCKCOUNTERS
/*
* We use a "read" lock to prevent other threads from reading the
* counter while we "writing" a counter field. The write access itself
@@ -219,7 +251,7 @@ incrementcounter(isc_stats_t *stats, int counter) {
*/
if (prev == (isc_int32_t)0xffffffff)
isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi, 1);
-#elif defined(ISC_PLATFORM_HAVEXADDQ)
+#elif ISC_STATS_HAVEATOMICQ
UNUSED(prev);
isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], 1);
#else
@@ -227,7 +259,7 @@ incrementcounter(isc_stats_t *stats, int counter) {
stats->counters[counter]++;
#endif
-#ifdef ISC_RWLOCK_USEATOMIC
+#if ISC_STATS_LOCKCOUNTERS
isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
#endif
}
@@ -236,7 +268,7 @@ static inline void
decrementcounter(isc_stats_t *stats, int counter) {
isc_int32_t prev;
-#ifdef ISC_RWLOCK_USEATOMIC
+#if ISC_STATS_LOCKCOUNTERS
isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
#endif
@@ -245,7 +277,7 @@ decrementcounter(isc_stats_t *stats, int counter) {
if (prev == 0)
isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi,
-1);
-#elif defined(ISC_PLATFORM_HAVEXADDQ)
+#elif ISC_STATS_HAVEATOMICQ
UNUSED(prev);
isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], -1);
#else
@@ -253,7 +285,7 @@ decrementcounter(isc_stats_t *stats, int counter) {
stats->counters[counter]--;
#endif
-#ifdef ISC_RWLOCK_USEATOMIC
+#if ISC_STATS_LOCKCOUNTERS
isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
#endif
}
@@ -262,7 +294,7 @@ static void
copy_counters(isc_stats_t *stats) {
int i;
-#ifdef ISC_RWLOCK_USEATOMIC
+#if ISC_STATS_LOCKCOUNTERS
/*
* We use a "write" lock before "reading" the statistics counters as
* an exclusive lock.
@@ -270,19 +302,21 @@ copy_counters(isc_stats_t *stats) {
isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_write);
#endif
-#if ISC_STATS_USEMULTIFIELDS
for (i = 0; i < stats->ncounters; i++) {
+#if ISC_STATS_USEMULTIFIELDS
stats->copiedcounters[i] =
- (isc_uint64_t)(stats->counters[i].hi) << 32 |
- stats->counters[i].lo;
- }
+ (isc_uint64_t)(stats->counters[i].hi) << 32 |
+ stats->counters[i].lo;
+#elif ISC_STATS_HAVEATOMICQ
+ /* use xaddq(..., 0) as an atomic load */
+ stats->copiedcounters[i] =
+ (isc_uint64_t)isc_atomic_xaddq((isc_int64_t *)&stats->counters[i], 0);
#else
- UNUSED(i);
- memmove(stats->copiedcounters, stats->counters,
- stats->ncounters * sizeof(isc_stat_t));
+ stats->copiedcounters[i] = stats->counters[i];
#endif
+ }
-#ifdef ISC_RWLOCK_USEATOMIC
+#if ISC_STATS_LOCKCOUNTERS
isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_write);
#endif
}