summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/i386/i386/pmap.c4
-rw-r--r--sys/kern/sched_4bsd.c24
-rw-r--r--sys/kern/sched_ule.c26
-rw-r--r--sys/sys/proc.h1
-rw-r--r--sys/sys/sched.h20
5 files changed, 20 insertions, 55 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index df2e1ca2c515..d7073b69e66f 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -807,7 +807,7 @@ pmap_pte_quick(pmap_t pmap, vm_offset_t va)
if (pmap_is_current(pmap))
return (vtopte(va));
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- KASSERT(sched_ispinned(), ("curthread not pinned"));
+ KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
newpf = *pde & PG_FRAME;
if ((*PMAP1 & PG_FRAME) != newpf) {
*PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M;
@@ -1622,7 +1622,7 @@ pmap_remove_page(pmap_t pmap, vm_offset_t va)
pt_entry_t *pte;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- KASSERT(sched_ispinned(), ("curthread not pinned"));
+ KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
return;
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 66863397df30..c1a1c6b63617 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -87,7 +87,6 @@ struct kse {
} ke_state; /* (j) KSE status. */
int ke_cpticks; /* (j) Ticks of cpu time. */
struct runq *ke_runq; /* runq the kse is currently on */
- int ke_pinned; /* (k) nested count, pinned to a cpu */
};
#define ke_proc ke_thread->td_proc
@@ -125,7 +124,7 @@ struct kg_sched {
* cpus.
*/
#define KSE_CAN_MIGRATE(ke) \
- ((ke)->ke_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
+ ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
static struct kse kse0;
static struct kg_sched kg_sched0;
@@ -1172,26 +1171,5 @@ sched_pctcpu(struct thread *td)
return (0);
}
-
-void
-sched_pin(void)
-{
- curthread->td_sched->ke_pinned++;
-}
-
- void
-sched_unpin(void)
-{
- curthread->td_sched->ke_pinned--;
-}
-
-#ifdef INVARIANTS
-int
-sched_ispinned(void)
-{
- return (curthread->td_sched->ke_pinned);
-}
-#endif
-
#define KERN_SWITCH_INCLUDE 1
#include "kern/kern_switch.c"
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index fddc2b524ecb..bab2bdec65ea 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -112,7 +112,6 @@ struct kse {
KES_ONRUNQ
} ke_state; /* (j) thread sched specific status. */
int ke_slptime;
- int ke_pinned; /* (k) nested coult.. pinned to a cpu */
int ke_slice;
struct runq *ke_runq;
u_char ke_cpu; /* CPU that we have affinity for. */
@@ -344,10 +343,10 @@ static struct kse *kseq_steal(struct kseq *kseq, int stealidle);
*/
#ifdef __i386__
#define KSE_CAN_MIGRATE(ke, class) \
- ((ke)->ke_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
+ ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
#else /* !__i386__ */
#define KSE_CAN_MIGRATE(ke, class) \
- ((class) != PRI_ITHD && (ke)->ke_pinned == 0 && \
+ ((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 && \
((ke)->ke_flags & KEF_BOUND) == 0)
#endif /* !__i386__ */
#endif
@@ -1902,26 +1901,5 @@ sched_sizeof_thread(void)
{
return (sizeof(struct thread) + sizeof(struct td_sched));
}
-
-void
-sched_pin(void)
-{
- curthread->td_sched->ke_pinned++;
-}
-
- void
-sched_unpin(void)
-{
- curthread->td_sched->ke_pinned--;
-}
-
-#ifdef INVARIANTS
-int
-sched_ispinned(void)
-{
- return (curthread->td_sched->ke_pinned);
-}
-#endif
-
#define KERN_SWITCH_INCLUDE 1
#include "kern/kern_switch.c"
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 1398bb227c03..57e180c95a07 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -268,6 +268,7 @@ struct thread {
LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */
struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */
int td_intr_nesting_level; /* (k) Interrupt recursion. */
+ int td_pinned; /* (k) Temporary cpu pin count. */
struct kse_thr_mailbox *td_mailbox; /* (*) Userland mailbox address. */
struct ucred *td_ucred; /* (k) Reference to credentials. */
struct thread *td_standin; /* (*) Use this for an upcall. */
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index 2cb4bc50e085..691239043e83 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -82,14 +82,10 @@ void sched_rem(struct thread *td);
* hold a thread on a particular CPU.
*/
void sched_bind(struct thread *td, int cpu);
+static __inline void sched_pin(void);
void sched_unbind(struct thread *td);
+static __inline void sched_unpin(void);
-/* these only work for curthread */
-void sched_pin(void);
-void sched_unpin(void);
-#ifdef INVARIANTS
-int sched_ispinned(void);
-#endif
/*
* These procedures tell the process data structure allocation code how
@@ -99,6 +95,18 @@ int sched_sizeof_ksegrp(void);
int sched_sizeof_proc(void);
int sched_sizeof_thread(void);
+static __inline void
+sched_pin(void)
+{
+ curthread->td_pinned++;
+}
+
+static __inline void
+sched_unpin(void)
+{
+ curthread->td_pinned--;
+}
+
/* temporarily here */
void schedinit(void);
void sched_destroyproc(struct proc *p);