diff options
author | Julian Elischer <julian@FreeBSD.org> | 2007-10-26 08:00:41 +0000 |
---|---|---|
committer | Julian Elischer <julian@FreeBSD.org> | 2007-10-26 08:00:41 +0000 |
commit | 7ab24ea3b9c35b2ff5e1e575111699e501ef203a (patch) | |
tree | e047e33390a9aae820357c54ff8602bc8faad475 /sys | |
parent | c47b138a96ddc99079eedf58c9d91465e64fceae (diff) | |
download | src-7ab24ea3b9c35b2ff5e1e575111699e501ef203a.tar.gz src-7ab24ea3b9c35b2ff5e1e575111699e501ef203a.zip |
Notes
Diffstat (limited to 'sys')
-rw-r--r-- | sys/kern/init_main.c | 5 | ||||
-rw-r--r-- | sys/kern/kern_exit.c | 7 | ||||
-rw-r--r-- | sys/kern/kern_idle.c | 16 | ||||
-rw-r--r-- | sys/kern/kern_intr.c | 38 | ||||
-rw-r--r-- | sys/kern/kern_kthread.c | 211 | ||||
-rw-r--r-- | sys/kern/kern_shutdown.c | 22 | ||||
-rw-r--r-- | sys/kern/kern_thread.c | 6 | ||||
-rw-r--r-- | sys/kern/sched_4bsd.c | 2 | ||||
-rw-r--r-- | sys/sys/kthread.h | 29 | ||||
-rw-r--r-- | sys/sys/proc.h | 5 |
10 files changed, 291 insertions, 50 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index ae18f12480b1..93abd69739e4 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -427,12 +427,13 @@ proc0_init(void *dummy __unused) td->td_priority = PVM; td->td_base_pri = PUSER; td->td_oncpu = 0; - td->td_flags = TDF_INMEM; + td->td_flags = TDF_INMEM|TDP_KTHREAD; p->p_peers = 0; p->p_leader = p; - bcopy("swapper", p->p_comm, sizeof ("swapper")); + strncpy(p->p_comm, "kernel", sizeof (p->p_comm)); + strncpy(td->td_name, "swapper", sizeof (td->td_name)); callout_init(&p->p_itcallout, CALLOUT_MPSAFE); callout_init_mtx(&p->p_limco, &p->p_mtx, 0); diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index 94b949b33ccf..af038a23ee92 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -136,8 +136,7 @@ exit1(struct thread *td, int rv) * MUST abort all other threads before proceeding past here. */ PROC_LOCK(p); - if (p->p_flag & P_HADTHREADS) { -retry: + while (p->p_flag & P_HADTHREADS) { /* * First check if some other thread got here before us.. * if so, act apropriatly, (exit or suspend); @@ -161,8 +160,8 @@ retry: * re-check all suspension request, the thread should * either be suspended there or exit. */ - if (thread_single(SINGLE_EXIT)) - goto retry; + if (! thread_single(SINGLE_EXIT)) + break; /* * All other activity in this process is now stopped. diff --git a/sys/kern/kern_idle.c b/sys/kern/kern_idle.c index 43ce37a2faee..c875e8592a43 100644 --- a/sys/kern/kern_idle.c +++ b/sys/kern/kern_idle.c @@ -60,27 +60,25 @@ idle_setup(void *dummy) #ifdef SMP SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { - error = kproc_create(sched_idletd, NULL, &p, - RFSTOPPED | RFHIGHPID, 0, "idle: cpu%d", pc->pc_cpuid); - pc->pc_idlethread = FIRST_THREAD_IN_PROC(p); +#endif + error = kproc_kthread_add(sched_idletd, NULL, &p, &td, + RFSTOPPED | RFHIGHPID, 0, "idled", "idle: cpu%d", pc->pc_cpuid); +#ifdef SMP + pc->pc_idlethread = td; #else - error = kproc_create(sched_idletd, NULL, &p, - RFSTOPPED | RFHIGHPID, 0, "idle"); - PCPU_SET(idlethread, FIRST_THREAD_IN_PROC(p)); + PCPU_SET(idlethread, td); #endif + p = td->td_proc; if (error) panic("idle_setup: kproc_create error %d\n", error); - PROC_LOCK(p); p->p_flag |= P_NOLOAD; - td = FIRST_THREAD_IN_PROC(p); thread_lock(td); TD_SET_CAN_RUN(td); td->td_flags |= TDF_IDLETD; sched_class(td, PRI_IDLE); sched_prio(td, PRI_MAX_IDLE); thread_unlock(td); - PROC_UNLOCK(p); #ifdef SMP } #endif diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c index ae749766561f..4d450f462351 100644 --- a/sys/kern/kern_intr.c +++ b/sys/kern/kern_intr.c @@ -80,6 +80,7 @@ struct intr_event *clk_intr_event; struct intr_event *tty_intr_event; void *softclock_ih; void *vm_ih; +struct proc *intrproc; static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); @@ -171,8 +172,7 @@ ithread_update(struct intr_thread *ithd) pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; /* Update name and priority. */ - strlcpy(td->td_proc->p_comm, ie->ie_fullname, - sizeof(td->td_proc->p_comm)); + strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); thread_lock(td); sched_prio(td, pri); thread_unlock(td); @@ -332,16 +332,15 @@ ithread_create(const char *name) { struct intr_thread *ithd; struct thread *td; - struct proc *p; int error; ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); - error = kproc_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID, - 0, "%s", name); + error = kproc_kthread_add(ithread_loop, ithd, &intrproc, + &td, RFSTOPPED | RFHIGHPID, + 0, "interd", "%s", name); if (error) panic("kproc_create() failed with %d", error); - td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */ thread_lock(td); sched_class(td, PRI_ITHD); TD_SET_IWAIT(td); @@ -357,16 +356,15 @@ ithread_create(const char *name, struct intr_handler *ih) { struct intr_thread *ithd; struct thread *td; - struct proc *p; int error; ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); - error = kproc_create(ithread_loop, ih, &p, RFSTOPPED | RFHIGHPID, - 0, "%s", name); + error = kproc_kthread_create(ithread_loop, ih, &intrproc, + &td, RFSTOPPED | RFHIGHPID, + 0, "interd", "%s", name); if (error) panic("kproc_create() failed with %d", error); - td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */ thread_lock(td); sched_class(td, PRI_ITHD); TD_SET_IWAIT(td); @@ -688,7 +686,7 @@ intr_event_schedule_thread(struct intr_event *ie) */ if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, - p->p_pid, p->p_comm); + p->p_pid, td->td_name); entropy.event = (uintptr_t)ie; entropy.td = ctd; random_harvest(&entropy, sizeof(entropy), 2, 0, @@ -706,12 +704,12 @@ intr_event_schedule_thread(struct intr_event *ie) thread_lock(td); if (TD_AWAITING_INTR(td)) { CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, - p->p_comm); + td->td_name); TD_CLR_IWAIT(td); sched_add(td, SRQ_INTR); } else { CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", - __func__, p->p_pid, p->p_comm, it->it_need, td->td_state); + __func__, p->p_pid, td->td_name, it->it_need, td->td_state); } thread_unlock(td); @@ -842,7 +840,7 @@ intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) */ if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, - p->p_pid, p->p_comm); + p->p_pid, td->td_name); entropy.event = (uintptr_t)ie; entropy.td = ctd; random_harvest(&entropy, sizeof(entropy), 2, 0, @@ -860,12 +858,12 @@ intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) thread_lock(td); if (TD_AWAITING_INTR(td)) { CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, - p->p_comm); + th->th_name); TD_CLR_IWAIT(td); sched_add(td, SRQ_INTR); } else { CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", - __func__, p->p_pid, p->p_comm, it->it_need, td->td_state); + __func__, p->p_pid, td->td_name, it->it_need, td->td_state); } thread_unlock(td); @@ -1100,9 +1098,9 @@ ithread_loop(void *arg) */ if (ithd->it_flags & IT_DEAD) { CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, - p->p_pid, p->p_comm); + p->p_pid, td->td_name); free(ithd, M_ITHREAD); - kproc_exit(0); + kthread_exit(0); } /* @@ -1171,9 +1169,9 @@ ithread_loop(void *arg) */ if (ithd->it_flags & IT_DEAD) { CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, - p->p_pid, p->p_comm); + p->p_pid, td->td_name); free(ithd, M_ITHREAD); - kproc_exit(0); + kthread_exit(0); } /* diff --git a/sys/kern/kern_kthread.c b/sys/kern/kern_kthread.c index a9359359dabd..aff0fe6a1c37 100644 --- a/sys/kern/kern_kthread.c +++ b/sys/kern/kern_kthread.c @@ -39,6 +39,8 @@ __FBSDID("$FreeBSD$"); #include <sys/unistd.h> #include <sys/wait.h> #include <sys/sched.h> +#include <vm/vm.h> +#include <vm/vm_extern.h> #include <machine/stdarg.h> @@ -95,7 +97,9 @@ kproc_create(void (*func)(void *), void *arg, /* this is a non-swapped system process */ PROC_LOCK(p2); + td = FIRST_THREAD_IN_PROC(p2); p2->p_flag |= P_SYSTEM | P_KTHREAD; + td->td_pflags |= TDP_KTHREAD; mtx_lock(&p2->p_sigacts->ps_mtx); p2->p_sigacts->ps_flag |= PS_NOCLDWAIT; mtx_unlock(&p2->p_sigacts->ps_mtx); @@ -105,9 +109,12 @@ kproc_create(void (*func)(void *), void *arg, va_start(ap, fmt); vsnprintf(p2->p_comm, sizeof(p2->p_comm), fmt, ap); va_end(ap); + /* set up arg0 for 'ps', et al */ + va_start(ap, fmt); + vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap); + va_end(ap); /* call the processes' main()... */ - td = FIRST_THREAD_IN_PROC(p2); cpu_set_fork_handler(td, func, arg); TD_SET_CAN_RUN(td); @@ -167,7 +174,7 @@ kproc_suspend(struct proc *p, int timo) } SIGADDSET(p->p_siglist, SIGSTOP); wakeup(p); - return msleep(&p->p_siglist, &p->p_mtx, PPAUSE | PDROP, "suspkt", timo); + return msleep(&p->p_siglist, &p->p_mtx, PPAUSE | PDROP, "suspkp", timo); } int @@ -194,7 +201,205 @@ kproc_suspend_check(struct proc *p) PROC_LOCK(p); while (SIGISMEMBER(p->p_siglist, SIGSTOP)) { wakeup(&p->p_siglist); - msleep(&p->p_siglist, &p->p_mtx, PPAUSE, "ktsusp", 0); + msleep(&p->p_siglist, &p->p_mtx, PPAUSE, "kpsusp", 0); } PROC_UNLOCK(p); } + + +/* + * Start a kernel thread. + * + * This function is used to start "internal" daemons and intended + * to be called from SYSINIT(). + */ + +void +kthread_start(udata) + const void *udata; +{ + const struct kthread_desc *kp = udata; + int error; + + error = kthread_add((void (*)(void *))kp->func, NULL, + NULL, kp->global_threadpp, 0, 0, "%s", kp->arg0); + if (error) + panic("kthread_start: %s: error %d", kp->arg0, error); +} + +/* + * Create a kernel thread. It shares its address space + * with proc0 - ie: kernel only. + * + * func is the function to start. + * arg is the parameter to pass to function on first startup. + * newtdp is the return value pointing to the thread's struct thread. + * ** XXX fix this --> flags are flags to fork1 (in unistd.h) + * ** XXX are any used? + * fmt and following will be *printf'd into (*newtd)->td_name (for ps, etc.). + */ +int +kthread_add(void (*func)(void *), void *arg, struct proc *p, + struct thread **newtdp, int flags, int pages, const char *fmt, ...) +{ + va_list ap; + struct thread *newtd, *oldtd; + int error; + + if (!proc0.p_stats) + panic("kthread_add called too soon"); + + error = 0; + if (p == NULL) { + p = &proc0; + oldtd = &thread0; + } else { + oldtd = FIRST_THREAD_IN_PROC(p); + } + + /* Initialize our td */ + newtd = thread_alloc(); + if (newtd == NULL) + return (ENOMEM); + + bzero(&newtd->td_startzero, + __rangeof(struct thread, td_startzero, td_endzero)); +/* XXX check if we should zero. */ + bcopy(&oldtd->td_startcopy, &newtd->td_startcopy, + __rangeof(struct thread, td_startcopy, td_endcopy)); + + /* set up arg0 for 'ps', et al */ + va_start(ap, fmt); + vsnprintf(newtd->td_name, sizeof(newtd->td_name), fmt, ap); + va_end(ap); + + newtd->td_proc = p; /* needed for cpu_set_upcall */ + + /* XXX optimise this probably? */ + /* On x86 (and probably the others too) it is way too full of junk */ + /* Needs a better name */ + cpu_set_upcall(newtd, oldtd); + /* put the designated function(arg) as the resume context */ + cpu_set_fork_handler(newtd, func, arg); + + newtd->td_pflags |= TDP_KTHREAD; + newtd->td_ucred = crhold(p->p_ucred); + /* Allocate and switch to an alternate kstack if specified. */ + if (pages != 0) + vm_thread_new_altkstack(newtd, pages); + + /* this code almost the same as create_thread() in kern_thr.c */ + PROC_LOCK(p); + p->p_flag |= P_HADTHREADS; + newtd->td_sigmask = oldtd->td_sigmask; /* XXX dubious */ + PROC_SLOCK(p); + thread_link(newtd, p); + thread_lock(oldtd); + /* let the scheduler know about these things. */ + sched_fork_thread(oldtd, newtd); + TD_SET_CAN_RUN(newtd); + thread_unlock(oldtd); + PROC_SUNLOCK(p); + PROC_UNLOCK(p); + + + /* Delay putting it on the run queue until now. */ + if (!(flags & RFSTOPPED)) { + thread_lock(newtd); + sched_add(newtd, SRQ_BORING); + thread_unlock(newtd); + } + if (newtdp) + *newtdp = newtd; + return 0; +} + +void +kthread_exit(int ecode) +{ + thread_exit(); +} + +/* + * Advise a kernel process to suspend (or resume) in its main loop. + * Participation is voluntary. + */ +int +kthread_suspend(struct thread *td, int timo) +{ + if ((td->td_pflags & TDP_KTHREAD) == 0) { + return (EINVAL); + } + thread_lock(td); + td->td_flags |= TDF_KTH_SUSP; + thread_unlock(td); + /* + * If it's stopped for some other reason, + * kick it to notice our request + * or we'll end up timing out + */ + wakeup(td); /* traditional place for kernel threads to sleep on */ /* XXX ?? */ + return (tsleep(&td->td_flags, PPAUSE | PDROP, "suspkt", timo)); +} + +/* + * let the kthread it can keep going again. + */ +int +kthread_resume(struct thread *td) +{ + if ((td->td_pflags & TDP_KTHREAD) == 0) { + return (EINVAL); + } + thread_lock(td); + td->td_flags &= ~TDF_KTH_SUSP; + thread_unlock(td); + wakeup(&td->td_name); + return (0); +} + +/* + * Used by the thread to poll as to whether it should yield/sleep + * and notify the caller that is has happened. + */ +void +kthread_suspend_check(struct thread *td) +{ + while (td->td_flags & TDF_KTH_SUSP) { + /* + * let the caller know we got the message then sleep + */ + wakeup(&td->td_flags); + tsleep(&td->td_name, PPAUSE, "ktsusp", 0); + } +} + +int +kproc_kthread_add(void (*func)(void *), void *arg, + struct proc **procptr, struct thread **tdptr, + int flags, int pages, char * procname, const char *fmt, ...) +{ + int error; + va_list ap; + char buf[100]; + struct thread *td; + + if (*procptr == 0) { + error = kproc_create(func, arg, + procptr, flags, pages, "%s", procname); + if (error) + return (error); + td = FIRST_THREAD_IN_PROC(*procptr); + *tdptr = td; + va_start(ap, fmt); + vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap); + va_end(ap); + return (0); + } + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + error = kthread_add(func, arg, *procptr, + tdptr, flags, pages, "%s", buf); + return (error); +} diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c index 14a74b7f219f..088e781e2335 100644 --- a/sys/kern/kern_shutdown.c +++ b/sys/kern/kern_shutdown.c @@ -616,6 +616,28 @@ kproc_shutdown(void *arg, int howto) printf("done\n"); } +void +kthread_shutdown(void *arg, int howto) +{ + struct thread *td; + char procname[MAXCOMLEN + 1]; + int error; + + if (panicstr) + return; + + td = (struct thread *)arg; + strlcpy(procname, td->td_name, sizeof(procname)); + printf("Waiting (max %d seconds) for system thread `%s' to stop...", + kproc_shutdown_wait, procname); + error = kthread_suspend(td, kproc_shutdown_wait * hz); + + if (error == EWOULDBLOCK) + printf("timed out\n"); + else + printf("done\n"); +} + /* Registration of dumpers */ int set_dumper(struct dumperinfo *di) diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index 97c56a4f4595..2a28823015b4 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -657,12 +657,6 @@ thread_single(int mode) sleepq_abort(td2, EINTR); break; case SINGLE_BOUNDARY: - if (TD_IS_SUSPENDED(td2) && - !(td2->td_flags & TDF_BOUNDARY)) - thread_unsuspend_one(td2); - if (TD_ON_SLEEPQ(td2) && - (td2->td_flags & TDF_SINTR)) - sleepq_abort(td2, ERESTART); break; default: if (TD_IS_SUSPENDED(td2)) { diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index 50f9aafcab13..058ee0db4453 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -1367,11 +1367,9 @@ sched_tick(void) void sched_idletd(void *dummy) { - struct proc *p; struct thread *td; td = curthread; - p = td->td_proc; for (;;) { mtx_assert(&Giant, MA_NOTOWNED); diff --git a/sys/sys/kthread.h b/sys/sys/kthread.h index 3bc83463c0ca..2bd4b33bb36a 100644 --- a/sys/sys/kthread.h +++ b/sys/sys/kthread.h @@ -42,13 +42,38 @@ struct kproc_desc { struct proc **global_procpp; /* ptr to proc ptr save area */ }; -void kproc_shutdown(void *, int); -void kproc_start(const void *); + /* A kernel thread descriptor; used to start "internal" daemons. */ +struct kthread_desc { + char *arg0; /* arg 0 (for 'ps' listing) */ + void (*func)(void); /* "main" for kernel thread */ + struct thread **global_threadpp; /* ptr to thread ptr save area */ +}; + int kproc_create(void (*)(void *), void *, struct proc **, int flags, int pages, const char *, ...) __printflike(6, 7); void kproc_exit(int) __dead2; int kproc_resume(struct proc *); +void kproc_shutdown(void *, int); +void kproc_start(const void *); int kproc_suspend(struct proc *, int); void kproc_suspend_check(struct proc *); +/* create a thread inthe given process. create the process if needed */ +int kproc_kthread_add(void (*)(void *), void *, + struct proc **, + struct thread **, + int flags, int pages, + char * procname, const char *, ...) __printflike(8, 9); + +int kthread_add(void (*)(void *), void *, + struct proc *, struct thread **, + int flags, int pages, const char *, ...) __printflike(7, 8); +void kthread_exit(int) __dead2; +int kthread_resume(struct thread *); +void kthread_shutdown(void *, int); +void kthread_start(const void *); +int kthread_suspend(struct thread *, int); +void kthread_suspend_check(struct thread *); + + #endif /* !_SYS_KTHREAD_H_ */ diff --git a/sys/sys/proc.h b/sys/sys/proc.h index c0e91eb7e3b8..6c8bac7d7219 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -322,7 +322,7 @@ do { \ #define TDF_IDLETD 0x00000020 /* This is a per-CPU idle thread. */ #define TDF_SELECT 0x00000040 /* Selecting; wakeup/waiting danger. */ #define TDF_SLEEPABORT 0x00000080 /* sleepq_abort was called. */ -#define TDF_UNUSEDx100 0x00000100 /* --available-- */ +#define TDF_KTH_SUSP 0x00000100 /* kthread is suspended */ #define TDF_UBORROWING 0x00000200 /* Thread is borrowing user pri. */ #define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */ #define TDF_ASTPENDING 0x00000800 /* Thread has some asynchronous events. */ @@ -348,7 +348,7 @@ do { \ /* * "Private" flags kept in td_pflags: - * These are only accessed by curthread and thus need no locking. + * These are only written by curthread and thus need no locking. */ #define TDP_OLDMASK 0x00000001 /* Need to restore mask after suspend. */ #define TDP_INKTR 0x00000002 /* Thread is currently in KTR code. */ @@ -371,6 +371,7 @@ do { \ #define TDP_NORUNNINGBUF 0x00040000 /* Ignore runningbufspace check */ #define TDP_WAKEUP 0x00080000 /* Don't sleep in umtx cond_wait */ #define TDP_INBDFLUSH 0x00100000 /* Already in BO_BDFLUSH, do not recurse */ +#define TDP_KTHREAD 0x00200000 /* This is an official kernel thread */ /* * Reasons that the current thread can not be run yet. |