aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/hwpmc
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/hwpmc')
-rw-r--r--sys/dev/hwpmc/hwpmc_arm64.c72
-rw-r--r--sys/dev/hwpmc/hwpmc_arm64.h2
-rw-r--r--sys/dev/hwpmc/hwpmc_core.c4
-rw-r--r--sys/dev/hwpmc/hwpmc_logging.c137
-rw-r--r--sys/dev/hwpmc/hwpmc_x86.c14
-rw-r--r--sys/dev/hwpmc/pmu_dmc620.c2
6 files changed, 137 insertions, 94 deletions
diff --git a/sys/dev/hwpmc/hwpmc_arm64.c b/sys/dev/hwpmc/hwpmc_arm64.c
index af8d25b098c4..310e43065716 100644
--- a/sys/dev/hwpmc/hwpmc_arm64.c
+++ b/sys/dev/hwpmc/hwpmc_arm64.c
@@ -34,10 +34,12 @@
#include <machine/pmc_mdep.h>
#include <machine/cpu.h>
+#include <machine/machdep.h>
#include "opt_acpi.h"
static int arm64_npmcs;
+static bool arm64_64bit_events __read_mostly = false;
struct arm64_event_code_map {
enum pmc_event pe_ev;
@@ -112,7 +114,7 @@ arm64_counter_disable(unsigned int pmc)
/*
* Performance Monitors Control Register
*/
-static uint32_t
+static uint64_t
arm64_pmcr_read(void)
{
uint32_t reg;
@@ -123,7 +125,7 @@ arm64_pmcr_read(void)
}
static void
-arm64_pmcr_write(uint32_t reg)
+arm64_pmcr_write(uint64_t reg)
{
WRITE_SPECIALREG(pmcr_el0, reg);
@@ -134,7 +136,7 @@ arm64_pmcr_write(uint32_t reg)
/*
* Performance Count Register N
*/
-static uint32_t
+static uint64_t
arm64_pmcn_read(unsigned int pmc)
{
@@ -148,7 +150,7 @@ arm64_pmcn_read(unsigned int pmc)
}
static void
-arm64_pmcn_write(unsigned int pmc, uint32_t reg)
+arm64_pmcn_write(unsigned int pmc, uint64_t reg)
{
KASSERT(pmc < arm64_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
@@ -163,7 +165,7 @@ static int
arm64_allocate_pmc(int cpu, int ri, struct pmc *pm,
const struct pmc_op_pmcallocate *a)
{
- uint32_t config;
+ uint64_t config;
enum pmc_event pe;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
@@ -186,10 +188,18 @@ arm64_allocate_pmc(int cpu, int ri, struct pmc *pm,
switch (a->pm_caps & (PMC_CAP_SYSTEM | PMC_CAP_USER)) {
case PMC_CAP_SYSTEM:
+ /* Exclude EL0 */
config |= PMEVTYPER_U;
+ if (in_vhe()) {
+ /* If in VHE we need to include EL2 and exclude EL1 */
+ config |= PMEVTYPER_NSH | PMEVTYPER_P;
+ }
break;
case PMC_CAP_USER:
+ /* Exclude EL1 */
config |= PMEVTYPER_P;
+ /* Exclude EL2 */
+ config &= ~PMEVTYPER_NSH;
break;
default:
/*
@@ -197,11 +207,16 @@ arm64_allocate_pmc(int cpu, int ri, struct pmc *pm,
* (default setting) or if both flags are specified
* (user explicitly requested both qualifiers).
*/
+ if (in_vhe()) {
+ /* If in VHE we need to include EL2 */
+ config |= PMEVTYPER_NSH;
+ }
break;
}
pm->pm_md.pm_arm64.pm_arm64_evsel = config;
- PMCDBG2(MDP, ALL, 2, "arm64-allocate ri=%d -> config=0x%x", ri, config);
+ PMCDBG2(MDP, ALL, 2, "arm64-allocate ri=%d -> config=0x%lx", ri,
+ config);
return (0);
}
@@ -233,7 +248,15 @@ arm64_read_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t *v)
/* Reread counter in case we raced. */
tmp = arm64_pmcn_read(ri);
}
- tmp += 0x100000000llu * pm->pm_pcpu_state[cpu].pps_overflowcnt;
+ /*
+ * If the counter is 32-bit increment the upper bits of the counter.
+ * It it is 64-bit then there is nothing we can do as tmp is already
+ * 64-bit.
+ */
+ if (!arm64_64bit_events) {
+ tmp &= 0xffffffffu;
+ tmp += (uint64_t)pm->pm_pcpu_state[cpu].pps_overflowcnt << 32;
+ }
intr_restore(s);
PMCDBG2(MDP, REA, 2, "arm64-read id=%d -> %jd", ri, tmp);
@@ -267,7 +290,10 @@ arm64_write_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t v)
PMCDBG3(MDP, WRI, 1, "arm64-write cpu=%d ri=%d v=%jx", cpu, ri, v);
- pm->pm_pcpu_state[cpu].pps_overflowcnt = v >> 32;
+ if (!arm64_64bit_events) {
+ pm->pm_pcpu_state[cpu].pps_overflowcnt = v >> 32;
+ v &= 0xffffffffu;
+ }
arm64_pmcn_write(ri, v);
return (0);
@@ -299,7 +325,7 @@ arm64_config_pmc(int cpu, int ri, struct pmc *pm)
static int
arm64_start_pmc(int cpu, int ri, struct pmc *pm)
{
- uint32_t config;
+ uint64_t config;
config = pm->pm_md.pm_arm64.pm_arm64_evsel;
@@ -475,9 +501,10 @@ arm64_pcpu_init(struct pmc_mdep *md, int cpu)
WRITE_SPECIALREG(pmcntenclr_el0, 0xffffffff);
WRITE_SPECIALREG(pmintenclr_el1, 0xffffffff);
- /* Enable unit */
- pmcr = arm64_pmcr_read();
- pmcr |= PMCR_E;
+ /* Enable unit with a useful default state */
+ pmcr = PMCR_LC | PMCR_C | PMCR_P | PMCR_E;
+ if (arm64_64bit_events)
+ pmcr |= PMCR_LP;
arm64_pmcr_write(pmcr);
return (0);
@@ -486,7 +513,7 @@ arm64_pcpu_init(struct pmc_mdep *md, int cpu)
static int
arm64_pcpu_fini(struct pmc_mdep *md, int cpu)
{
- uint32_t pmcr;
+ uint64_t pmcr;
PMCDBG0(MDP, INI, 1, "arm64-pcpu-fini");
@@ -507,13 +534,14 @@ pmc_arm64_initialize(void)
struct pmc_mdep *pmc_mdep;
struct pmc_classdep *pcd;
int classes, idcode, impcode;
- int reg;
+ uint64_t dfr;
+ uint64_t pmcr;
uint64_t midr;
- reg = arm64_pmcr_read();
- arm64_npmcs = (reg & PMCR_N_MASK) >> PMCR_N_SHIFT;
- impcode = (reg & PMCR_IMP_MASK) >> PMCR_IMP_SHIFT;
- idcode = (reg & PMCR_IDCODE_MASK) >> PMCR_IDCODE_SHIFT;
+ pmcr = arm64_pmcr_read();
+ arm64_npmcs = (pmcr & PMCR_N_MASK) >> PMCR_N_SHIFT;
+ impcode = (pmcr & PMCR_IMP_MASK) >> PMCR_IMP_SHIFT;
+ idcode = (pmcr & PMCR_IDCODE_MASK) >> PMCR_IDCODE_SHIFT;
PMCDBG1(MDP, INI, 1, "arm64-init npmcs=%d", arm64_npmcs);
@@ -529,6 +557,12 @@ pmc_arm64_initialize(void)
midr &= ~(CPU_VAR_MASK | CPU_REV_MASK);
snprintf(pmc_cpuid, sizeof(pmc_cpuid), "0x%016lx", midr);
+ /* Check if we have 64-bit counters */
+ if (get_kernel_reg(ID_AA64DFR0_EL1, &dfr)) {
+ if (ID_AA64DFR0_PMUVer_VAL(dfr) >= ID_AA64DFR0_PMUVer_3_5)
+ arm64_64bit_events = true;
+ }
+
/*
* Allocate space for pointers to PMC HW descriptors and for
* the MDEP structure used by MI code.
@@ -576,7 +610,7 @@ pmc_arm64_initialize(void)
pcd->pcd_class = PMC_CLASS_ARMV8;
pcd->pcd_num = arm64_npmcs;
pcd->pcd_ri = pmc_mdep->pmd_npmc;
- pcd->pcd_width = 32;
+ pcd->pcd_width = 64;
pcd->pcd_allocate_pmc = arm64_allocate_pmc;
pcd->pcd_config_pmc = arm64_config_pmc;
diff --git a/sys/dev/hwpmc/hwpmc_arm64.h b/sys/dev/hwpmc/hwpmc_arm64.h
index e1f605a0371b..97909d8e7c92 100644
--- a/sys/dev/hwpmc/hwpmc_arm64.h
+++ b/sys/dev/hwpmc/hwpmc_arm64.h
@@ -42,7 +42,7 @@
#ifdef _KERNEL
/* MD extension for 'struct pmc' */
struct pmc_md_arm64_pmc {
- uint32_t pm_arm64_evsel;
+ uint64_t pm_arm64_evsel;
};
#endif /* _KERNEL */
#endif /* _DEV_HWPMC_ARMV8_H_ */
diff --git a/sys/dev/hwpmc/hwpmc_core.c b/sys/dev/hwpmc/hwpmc_core.c
index bf224ded126f..83784b93718e 100644
--- a/sys/dev/hwpmc/hwpmc_core.c
+++ b/sys/dev/hwpmc/hwpmc_core.c
@@ -1051,7 +1051,7 @@ core_intr(struct trapframe *tf)
counter_u64_add(pmc_stats.pm_intr_ignored, 1);
if (found_interrupt)
- lapic_reenable_pmc();
+ lapic_reenable_pcint();
return (found_interrupt);
}
@@ -1150,7 +1150,7 @@ core2_intr(struct trapframe *tf)
counter_u64_add(pmc_stats.pm_intr_ignored, 1);
if (found_interrupt)
- lapic_reenable_pmc();
+ lapic_reenable_pcint();
/*
* Reenable all non-stalled PMCs.
diff --git a/sys/dev/hwpmc/hwpmc_logging.c b/sys/dev/hwpmc/hwpmc_logging.c
index 6394d7a9cdad..8fd7ef06a977 100644
--- a/sys/dev/hwpmc/hwpmc_logging.c
+++ b/sys/dev/hwpmc/hwpmc_logging.c
@@ -93,88 +93,93 @@ SYSCTL_INT(_kern_hwpmc, OID_AUTO, nbuffers_pcpu, CTLFLAG_RDTUN,
static struct mtx pmc_kthread_mtx; /* sleep lock */
-#define PMCLOG_INIT_BUFFER_DESCRIPTOR(D, buf, domain) do { \
- (D)->plb_fence = ((char *) (buf)) + 1024*pmclog_buffer_size; \
- (D)->plb_base = (D)->plb_ptr = ((char *) (buf)); \
- (D)->plb_domain = domain; \
- } while (0)
+#define PMCLOG_INIT_BUFFER_DESCRIPTOR(D, buf, domain) do { \
+ (D)->plb_fence = ((char *)(buf)) + 1024 * pmclog_buffer_size; \
+ (D)->plb_base = (D)->plb_ptr = ((char *)(buf)); \
+ (D)->plb_domain = domain; \
+} while (0)
-#define PMCLOG_RESET_BUFFER_DESCRIPTOR(D) do { \
- (D)->plb_ptr = (D)->plb_base; \
- } while (0)
+#define PMCLOG_RESET_BUFFER_DESCRIPTOR(D) do { \
+ (D)->plb_ptr = (D)->plb_base; \
+} while (0)
/*
* Log file record constructors.
*/
-#define _PMCLOG_TO_HEADER(T,L) \
+#define _PMCLOG_TO_HEADER(T, L) \
((PMCLOG_HEADER_MAGIC << 24) | (T << 16) | ((L) & 0xFFFF))
/* reserve LEN bytes of space and initialize the entry header */
-#define _PMCLOG_RESERVE_SAFE(PO,TYPE,LEN,ACTION, TSC) do { \
- uint32_t *_le; \
- int _len = roundup((LEN), sizeof(uint32_t)); \
- struct pmclog_header *ph; \
- if ((_le = pmclog_reserve((PO), _len)) == NULL) { \
- ACTION; \
- } \
- ph = (struct pmclog_header *)_le; \
- ph->pl_header =_PMCLOG_TO_HEADER(TYPE,_len); \
- ph->pl_tsc = (TSC); \
- _le += sizeof(*ph)/4 /* skip over timestamp */
+#define _PMCLOG_RESERVE_SAFE(PO, TYPE, LEN, ACTION, TSC) do { \
+ uint32_t *_le; \
+ int _len = roundup((LEN), sizeof(uint32_t)); \
+ struct pmclog_header *ph; \
+ \
+ if ((_le = pmclog_reserve((PO), _len)) == NULL) { \
+ ACTION; \
+ } \
+ ph = (struct pmclog_header *)_le; \
+ ph->pl_header =_PMCLOG_TO_HEADER(TYPE,_len); \
+ ph->pl_tsc = (TSC); \
+ _le += sizeof(*ph) / 4 /* skip over timestamp */
/* reserve LEN bytes of space and initialize the entry header */
-#define _PMCLOG_RESERVE(PO,TYPE,LEN,ACTION) do { \
- uint32_t *_le; \
- int _len = roundup((LEN), sizeof(uint32_t)); \
- uint64_t tsc; \
- struct pmclog_header *ph; \
- tsc = pmc_rdtsc(); \
- spinlock_enter(); \
- if ((_le = pmclog_reserve((PO), _len)) == NULL) { \
- spinlock_exit(); \
- ACTION; \
- } \
- ph = (struct pmclog_header *)_le; \
- ph->pl_header =_PMCLOG_TO_HEADER(TYPE,_len); \
- ph->pl_tsc = tsc; \
- _le += sizeof(*ph)/4 /* skip over timestamp */
-
-
-
-#define PMCLOG_RESERVE_SAFE(P,T,L,TSC) _PMCLOG_RESERVE_SAFE(P,T,L,return,TSC)
-#define PMCLOG_RESERVE(P,T,L) _PMCLOG_RESERVE(P,T,L,return)
-#define PMCLOG_RESERVE_WITH_ERROR(P,T,L) _PMCLOG_RESERVE(P,T,L, \
- error=ENOMEM;goto error)
+#define _PMCLOG_RESERVE(PO, TYPE, LEN, ACTION) do { \
+ uint32_t *_le; \
+ int _len = roundup((LEN), sizeof(uint32_t)); \
+ uint64_t tsc; \
+ struct pmclog_header *ph; \
+ \
+ tsc = pmc_rdtsc(); \
+ spinlock_enter(); \
+ if ((_le = pmclog_reserve((PO), _len)) == NULL) { \
+ spinlock_exit(); \
+ ACTION; \
+ } \
+ ph = (struct pmclog_header *)_le; \
+ ph->pl_header =_PMCLOG_TO_HEADER(TYPE,_len); \
+ ph->pl_tsc = tsc; \
+ _le += sizeof(*ph) / 4 /* skip over timestamp */
+
+#define PMCLOG_RESERVE_SAFE(P, T, L, TSC) \
+ _PMCLOG_RESERVE_SAFE(P, T, L, return, TSC)
+#define PMCLOG_RESERVE(P,T,L) \
+ _PMCLOG_RESERVE(P, T, L, return)
+#define PMCLOG_RESERVE_WITH_ERROR(P, T, L) \
+ _PMCLOG_RESERVE(P, T, L, error = ENOMEM; goto error)
#define PMCLOG_EMIT32(V) do { *_le++ = (V); } while (0)
#define PMCLOG_EMIT64(V) do { \
- *_le++ = (uint32_t) ((V) & 0xFFFFFFFF); \
- *_le++ = (uint32_t) (((V) >> 32) & 0xFFFFFFFF); \
- } while (0)
+ *_le++ = (uint32_t) ((V) & 0xFFFFFFFF); \
+ *_le++ = (uint32_t) (((V) >> 32) & 0xFFFFFFFF); \
+} while (0)
/* Emit a string. Caution: does NOT update _le, so needs to be last */
-#define PMCLOG_EMITSTRING(S,L) do { bcopy((S), _le, (L)); } while (0)
-#define PMCLOG_EMITNULLSTRING(L) do { bzero(_le, (L)); } while (0)
-
-#define PMCLOG_DESPATCH_SAFE(PO) \
- pmclog_release((PO)); \
- } while (0)
-
-#define PMCLOG_DESPATCH_SCHED_LOCK(PO) \
- pmclog_release_flags((PO), 0); \
- } while (0)
-
-#define PMCLOG_DESPATCH(PO) \
- pmclog_release((PO)); \
- spinlock_exit(); \
- } while (0)
-
-#define PMCLOG_DESPATCH_SYNC(PO) \
- pmclog_schedule_io((PO), 1); \
- spinlock_exit(); \
- } while (0)
-
+#define PMCLOG_EMITSTRING(S,L) do { \
+ bcopy((S), _le, (L)); \
+} while (0)
+#define PMCLOG_EMITNULLSTRING(L) do { \
+ bzero(_le, (L)); \
+} while (0)
+
+#define PMCLOG_DESPATCH_SAFE(PO) \
+ pmclog_release((PO)); \
+} while (0)
+
+#define PMCLOG_DESPATCH_SCHED_LOCK(PO) \
+ pmclog_release_flags((PO), 0); \
+} while (0)
+
+#define PMCLOG_DESPATCH(PO) \
+ pmclog_release((PO)); \
+ spinlock_exit(); \
+} while (0)
+
+#define PMCLOG_DESPATCH_SYNC(PO) \
+ pmclog_schedule_io((PO), 1); \
+ spinlock_exit(); \
+} while (0)
#define TSDELTA 4
/*
diff --git a/sys/dev/hwpmc/hwpmc_x86.c b/sys/dev/hwpmc/hwpmc_x86.c
index 1d04a6610674..2903c25ef5c9 100644
--- a/sys/dev/hwpmc/hwpmc_x86.c
+++ b/sys/dev/hwpmc/hwpmc_x86.c
@@ -230,7 +230,7 @@ struct pmc_mdep *
pmc_md_initialize(void)
{
int i;
- struct pmc_mdep *md;
+ struct pmc_mdep *md = NULL;
/* determine the CPU kind */
if (cpu_vendor_id == CPU_VENDOR_AMD ||
@@ -238,11 +238,13 @@ pmc_md_initialize(void)
md = pmc_amd_initialize();
else if (cpu_vendor_id == CPU_VENDOR_INTEL)
md = pmc_intel_initialize();
- else
+
+ if (md == NULL)
return (NULL);
+ nmi_register_handler(md->pmd_intr);
/* disallow sampling if we do not have an LAPIC */
- if (md != NULL && !lapic_enable_pmc())
+ if (!lapic_enable_pcint())
for (i = 0; i < md->pmd_nclass; i++) {
if (i == PMC_CLASS_INDEX_SOFT)
continue;
@@ -255,8 +257,10 @@ pmc_md_initialize(void)
void
pmc_md_finalize(struct pmc_mdep *md)
{
-
- lapic_disable_pmc();
+ if (md != NULL) {
+ lapic_disable_pcint();
+ nmi_remove_handler(md->pmd_intr);
+ }
if (cpu_vendor_id == CPU_VENDOR_AMD ||
cpu_vendor_id == CPU_VENDOR_HYGON)
pmc_amd_finalize(md);
diff --git a/sys/dev/hwpmc/pmu_dmc620.c b/sys/dev/hwpmc/pmu_dmc620.c
index 42e5dfdbf154..c33e5264f7de 100644
--- a/sys/dev/hwpmc/pmu_dmc620.c
+++ b/sys/dev/hwpmc/pmu_dmc620.c
@@ -68,7 +68,7 @@ struct pmu_dmc620_softc {
#define RD4(sc, r) bus_read_4((sc)->sc_res[0], (r))
#define WR4(sc, r, v) bus_write_4((sc)->sc_res[0], (r), (v))
-#define MD4(sc, r, c, s) WR4((sc), (r), RD4((sc), (r)) & ~(c) | (s))
+#define MD4(sc, r, c, s) WR4((sc), (r), (RD4((sc), (r)) & ~(c)) | (s))
#define CD2MD4(sc, u, r, c, s) MD4((sc), DMC620_CLKDIV2_REG((u), (r)), (c), (s))
#define CMD4(sc, u, r, c, s) MD4((sc), DMC620_CLK_REG((u), (r)), (c), (s))