summaryrefslogtreecommitdiff
path: root/sys/kern/kern_thread.c
diff options
context:
space:
mode:
authorPeter Wemm <peter@FreeBSD.org>2002-10-04 01:31:39 +0000
committerPeter Wemm <peter@FreeBSD.org>2002-10-04 01:31:39 +0000
commitc281972e6101f435cb7718c93b49ded29a6dd988 (patch)
tree58029885f8d63a9fab08006bc07753c79446466c /sys/kern/kern_thread.c
parent74cf93d79d85383e4c4cf884c5c20d8a1128dd97 (diff)
Notes
Diffstat (limited to 'sys/kern/kern_thread.c')
-rw-r--r--sys/kern/kern_thread.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 7123616e235d..1b133a1d4027 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -216,9 +216,23 @@ void
threadinit(void)
{
+#ifndef __ia64__
thread_zone = uma_zcreate("THREAD", sizeof (struct thread),
thread_ctor, thread_dtor, thread_init, thread_fini,
UMA_ALIGN_CACHE, 0);
+#else
+ /*
+ * XXX the ia64 kstack allocator is really lame and is at the mercy
+ * of contigmallloc(). This hackery is to pre-construct a whole
+ * pile of thread structures with associated kernel stacks early
+ * in the system startup while contigmalloc() still works. Once we
+ * have them, keep them. Sigh.
+ */
+ thread_zone = uma_zcreate("THREAD", sizeof (struct thread),
+ thread_ctor, thread_dtor, thread_init, thread_fini,
+ UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
+ uma_prealloc(thread_zone, 512); /* XXX arbitary */
+#endif
ksegrp_zone = uma_zcreate("KSEGRP", sizeof (struct ksegrp),
NULL, NULL, NULL, NULL,
UMA_ALIGN_CACHE, 0);