summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/alpha/alpha/vm_machdep.c101
-rw-r--r--sys/amd64/amd64/vm_machdep.c95
-rw-r--r--sys/i386/i386/vm_machdep.c101
-rw-r--r--sys/ia64/ia64/vm_machdep.c101
-rw-r--r--sys/kern/uipc_syscalls.c99
-rw-r--r--sys/powerpc/aim/vm_machdep.c101
-rw-r--r--sys/powerpc/powerpc/vm_machdep.c101
-rw-r--r--sys/sparc64/sparc64/vm_machdep.c104
8 files changed, 704 insertions, 99 deletions
diff --git a/sys/alpha/alpha/vm_machdep.c b/sys/alpha/alpha/vm_machdep.c
index 9139dc7d694b..05b9728b4b79 100644
--- a/sys/alpha/alpha/vm_machdep.c
+++ b/sys/alpha/alpha/vm_machdep.c
@@ -82,6 +82,8 @@ __FBSDID("$FreeBSD$");
#include <sys/vnode.h>
#include <sys/vmmeter.h>
#include <sys/kernel.h>
+#include <sys/mbuf.h>
+#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <sys/unistd.h>
@@ -101,6 +103,20 @@ __FBSDID("$FreeBSD$");
#include <sys/user.h>
+static void sf_buf_init(void *arg);
+SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
+
+/*
+ * Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the
+ * sf_freelist head with the sf_lock mutex.
+ */
+static struct {
+ SLIST_HEAD(, sf_buf) sf_head;
+ struct mtx sf_lock;
+} sf_freelist;
+
+static u_int sf_buf_alloc_want;
+
/*
* Finish a fork operation, with process p2 nearly set up.
* Copy and update the pcb, set up the stack so that the child
@@ -370,6 +386,91 @@ cpu_reset()
}
/*
+ * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
+ */
+static void
+sf_buf_init(void *arg)
+{
+ struct sf_buf *sf_bufs;
+ vm_offset_t sf_base;
+ int i;
+
+ mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF);
+ mtx_lock(&sf_freelist.sf_lock);
+ SLIST_INIT(&sf_freelist.sf_head);
+ sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
+ M_NOWAIT | M_ZERO);
+ for (i = 0; i < nsfbufs; i++) {
+ sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
+ SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list);
+ }
+ sf_buf_alloc_want = 0;
+ mtx_unlock(&sf_freelist.sf_lock);
+}
+
+/*
+ * Get an sf_buf from the freelist. Will block if none are available.
+ */
+struct sf_buf *
+sf_buf_alloc(struct vm_page *m)
+{
+ struct sf_buf *sf;
+ int error;
+
+ mtx_lock(&sf_freelist.sf_lock);
+ while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
+ sf_buf_alloc_want++;
+ error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
+ "sfbufa", 0);
+ sf_buf_alloc_want--;
+
+ /*
+ * If we got a signal, don't risk going back to sleep.
+ */
+ if (error)
+ break;
+ }
+ if (sf != NULL) {
+ SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list);
+ sf->m = m;
+ pmap_qenter(sf->kva, &sf->m, 1);
+ }
+ mtx_unlock(&sf_freelist.sf_lock);
+ return (sf);
+}
+
+/*
+ * Detatch mapped page and release resources back to the system.
+ */
+void
+sf_buf_free(void *addr, void *args)
+{
+ struct sf_buf *sf;
+ struct vm_page *m;
+
+ sf = args;
+ pmap_qremove((vm_offset_t)addr, 1);
+ m = sf->m;
+ vm_page_lock_queues();
+ vm_page_unwire(m, 0);
+ /*
+ * Check for the object going away on us. This can
+ * happen since we don't hold a reference to it.
+ * If so, we're responsible for freeing the page.
+ */
+ if (m->wire_count == 0 && m->object == NULL)
+ vm_page_free(m);
+ vm_page_unlock_queues();
+ sf->m = NULL;
+ mtx_lock(&sf_freelist.sf_lock);
+ SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
+ if (sf_buf_alloc_want > 0)
+ wakeup_one(&sf_freelist);
+ mtx_unlock(&sf_freelist.sf_lock);
+}
+
+/*
* Software interrupt handler for queued VM system processing.
*/
void
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index aab07a8f18af..d08bb2bd75e1 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -57,7 +57,9 @@ __FBSDID("$FreeBSD$");
#include <sys/vmmeter.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
+#include <sys/mbuf.h>
#include <sys/mutex.h>
+#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <sys/unistd.h>
@@ -78,6 +80,19 @@ __FBSDID("$FreeBSD$");
#include <amd64/isa/isa.h>
static void cpu_reset_real(void);
+static void sf_buf_init(void *arg);
+SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
+
+/*
+ * Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the
+ * sf_freelist head with the sf_lock mutex.
+ */
+static struct {
+ SLIST_HEAD(, sf_buf) sf_head;
+ struct mtx sf_lock;
+} sf_freelist;
+
+static u_int sf_buf_alloc_want;
/*
* Finish a fork operation, with process p2 nearly set up.
@@ -349,6 +364,86 @@ cpu_reset_real()
}
/*
+ * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
+ */
+static void
+sf_buf_init(void *arg)
+{
+ struct sf_buf *sf_bufs;
+ int i;
+
+ mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF);
+ mtx_lock(&sf_freelist.sf_lock);
+ SLIST_INIT(&sf_freelist.sf_head);
+ sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
+ M_NOWAIT | M_ZERO);
+ for (i = 0; i < nsfbufs; i++)
+ SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list);
+ sf_buf_alloc_want = 0;
+ mtx_unlock(&sf_freelist.sf_lock);
+}
+
+/*
+ * Get an sf_buf from the freelist. Will block if none are available.
+ */
+struct sf_buf *
+sf_buf_alloc(struct vm_page *m)
+{
+ struct sf_buf *sf;
+ int error;
+
+ mtx_lock(&sf_freelist.sf_lock);
+ while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
+ sf_buf_alloc_want++;
+ error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
+ "sfbufa", 0);
+ sf_buf_alloc_want--;
+
+ /*
+ * If we got a signal, don't risk going back to sleep.
+ */
+ if (error)
+ break;
+ }
+ if (sf != NULL) {
+ SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list);
+ sf->m = m;
+ sf->kva = PHYS_TO_DMAP(m->phys_addr);
+ }
+ mtx_unlock(&sf_freelist.sf_lock);
+ return (sf);
+}
+
+/*
+ * Detatch mapped page and release resources back to the system.
+ */
+void
+sf_buf_free(void *addr, void *args)
+{
+ struct sf_buf *sf;
+ struct vm_page *m;
+
+ sf = args;
+ m = sf->m;
+ vm_page_lock_queues();
+ vm_page_unwire(m, 0);
+ /*
+ * Check for the object going away on us. This can
+ * happen since we don't hold a reference to it.
+ * If so, we're responsible for freeing the page.
+ */
+ if (m->wire_count == 0 && m->object == NULL)
+ vm_page_free(m);
+ vm_page_unlock_queues();
+ sf->m = NULL;
+ mtx_lock(&sf_freelist.sf_lock);
+ SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
+ if (sf_buf_alloc_want > 0)
+ wakeup_one(&sf_freelist);
+ mtx_unlock(&sf_freelist.sf_lock);
+}
+
+/*
* Software interrupt handler for queued VM system processing.
*/
void
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index 7cec62039f64..19114e1b6bd7 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -62,8 +62,10 @@ __FBSDID("$FreeBSD$");
#include <sys/vmmeter.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
+#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/smp.h>
+#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <sys/unistd.h>
@@ -95,6 +97,20 @@ static void cpu_reset_proxy(void);
static u_int cpu_reset_proxyid;
static volatile u_int cpu_reset_proxy_active;
#endif
+static void sf_buf_init(void *arg);
+SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
+
+/*
+ * Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the
+ * sf_freelist head with the sf_lock mutex.
+ */
+static struct {
+ SLIST_HEAD(, sf_buf) sf_head;
+ struct mtx sf_lock;
+} sf_freelist;
+
+static u_int sf_buf_alloc_want;
+
extern int _ucodesel, _udatasel;
/*
@@ -548,6 +564,91 @@ cpu_reset_real()
}
/*
+ * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
+ */
+static void
+sf_buf_init(void *arg)
+{
+ struct sf_buf *sf_bufs;
+ vm_offset_t sf_base;
+ int i;
+
+ mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF);
+ mtx_lock(&sf_freelist.sf_lock);
+ SLIST_INIT(&sf_freelist.sf_head);
+ sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
+ M_NOWAIT | M_ZERO);
+ for (i = 0; i < nsfbufs; i++) {
+ sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
+ SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list);
+ }
+ sf_buf_alloc_want = 0;
+ mtx_unlock(&sf_freelist.sf_lock);
+}
+
+/*
+ * Get an sf_buf from the freelist. Will block if none are available.
+ */
+struct sf_buf *
+sf_buf_alloc(struct vm_page *m)
+{
+ struct sf_buf *sf;
+ int error;
+
+ mtx_lock(&sf_freelist.sf_lock);
+ while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
+ sf_buf_alloc_want++;
+ error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
+ "sfbufa", 0);
+ sf_buf_alloc_want--;
+
+ /*
+ * If we got a signal, don't risk going back to sleep.
+ */
+ if (error)
+ break;
+ }
+ if (sf != NULL) {
+ SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list);
+ sf->m = m;
+ pmap_qenter(sf->kva, &sf->m, 1);
+ }
+ mtx_unlock(&sf_freelist.sf_lock);
+ return (sf);
+}
+
+/*
+ * Detatch mapped page and release resources back to the system.
+ */
+void
+sf_buf_free(void *addr, void *args)
+{
+ struct sf_buf *sf;
+ struct vm_page *m;
+
+ sf = args;
+ pmap_qremove((vm_offset_t)addr, 1);
+ m = sf->m;
+ vm_page_lock_queues();
+ vm_page_unwire(m, 0);
+ /*
+ * Check for the object going away on us. This can
+ * happen since we don't hold a reference to it.
+ * If so, we're responsible for freeing the page.
+ */
+ if (m->wire_count == 0 && m->object == NULL)
+ vm_page_free(m);
+ vm_page_unlock_queues();
+ sf->m = NULL;
+ mtx_lock(&sf_freelist.sf_lock);
+ SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
+ if (sf_buf_alloc_want > 0)
+ wakeup_one(&sf_freelist);
+ mtx_unlock(&sf_freelist.sf_lock);
+}
+
+/*
* Software interrupt handler for queued VM system processing.
*/
void
diff --git a/sys/ia64/ia64/vm_machdep.c b/sys/ia64/ia64/vm_machdep.c
index ae8ff3b32cad..5d771ed50569 100644
--- a/sys/ia64/ia64/vm_machdep.c
+++ b/sys/ia64/ia64/vm_machdep.c
@@ -78,6 +78,8 @@
#include <sys/vnode.h>
#include <sys/vmmeter.h>
#include <sys/kernel.h>
+#include <sys/mbuf.h>
+#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <sys/unistd.h>
@@ -98,6 +100,20 @@
#include <i386/include/psl.h>
+static void sf_buf_init(void *arg);
+SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
+
+/*
+ * Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the
+ * sf_freelist head with the sf_lock mutex.
+ */
+static struct {
+ SLIST_HEAD(, sf_buf) sf_head;
+ struct mtx sf_lock;
+} sf_freelist;
+
+static u_int sf_buf_alloc_want;
+
void
cpu_thread_exit(struct thread *td)
{
@@ -307,6 +323,91 @@ cpu_sched_exit(td)
}
/*
+ * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
+ */
+static void
+sf_buf_init(void *arg)
+{
+ struct sf_buf *sf_bufs;
+ vm_offset_t sf_base;
+ int i;
+
+ mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF);
+ mtx_lock(&sf_freelist.sf_lock);
+ SLIST_INIT(&sf_freelist.sf_head);
+ sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
+ M_NOWAIT | M_ZERO);
+ for (i = 0; i < nsfbufs; i++) {
+ sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
+ SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list);
+ }
+ sf_buf_alloc_want = 0;
+ mtx_unlock(&sf_freelist.sf_lock);
+}
+
+/*
+ * Get an sf_buf from the freelist. Will block if none are available.
+ */
+struct sf_buf *
+sf_buf_alloc(struct vm_page *m)
+{
+ struct sf_buf *sf;
+ int error;
+
+ mtx_lock(&sf_freelist.sf_lock);
+ while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
+ sf_buf_alloc_want++;
+ error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
+ "sfbufa", 0);
+ sf_buf_alloc_want--;
+
+ /*
+ * If we got a signal, don't risk going back to sleep.
+ */
+ if (error)
+ break;
+ }
+ if (sf != NULL) {
+ SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list);
+ sf->m = m;
+ pmap_qenter(sf->kva, &sf->m, 1);
+ }
+ mtx_unlock(&sf_freelist.sf_lock);
+ return (sf);
+}
+
+/*
+ * Detatch mapped page and release resources back to the system.
+ */
+void
+sf_buf_free(void *addr, void *args)
+{
+ struct sf_buf *sf;
+ struct vm_page *m;
+
+ sf = args;
+ pmap_qremove((vm_offset_t)addr, 1);
+ m = sf->m;
+ vm_page_lock_queues();
+ vm_page_unwire(m, 0);
+ /*
+ * Check for the object going away on us. This can
+ * happen since we don't hold a reference to it.
+ * If so, we're responsible for freeing the page.
+ */
+ if (m->wire_count == 0 && m->object == NULL)
+ vm_page_free(m);
+ vm_page_unlock_queues();
+ sf->m = NULL;
+ mtx_lock(&sf_freelist.sf_lock);
+ SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
+ if (sf_buf_alloc_want > 0)
+ wakeup_one(&sf_freelist);
+ mtx_unlock(&sf_freelist.sf_lock);
+}
+
+/*
* Software interrupt handler for queued VM system processing.
*/
void
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index c9dff02f5041..2d08b7e32132 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -77,9 +77,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
-static void sf_buf_init(void *arg);
-SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
-
static int sendit(struct thread *td, int s, struct msghdr *mp, int flags);
static int recvit(struct thread *td, int s, struct msghdr *mp, void *namelenp);
@@ -91,17 +88,6 @@ static int getpeername1(struct thread *td, struct getpeername_args *uap,
int compat);
/*
- * Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the
- * sf_freelist head with the sf_lock mutex.
- */
-static struct {
- SLIST_HEAD(, sf_buf) sf_head;
- struct mtx sf_lock;
-} sf_freelist;
-
-static u_int sf_buf_alloc_want;
-
-/*
* System call interface to the socket abstraction.
*/
#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
@@ -1632,91 +1618,6 @@ getsockaddr(namp, uaddr, len)
}
/*
- * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
- */
-static void
-sf_buf_init(void *arg)
-{
- struct sf_buf *sf_bufs;
- vm_offset_t sf_base;
- int i;
-
- mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF);
- mtx_lock(&sf_freelist.sf_lock);
- SLIST_INIT(&sf_freelist.sf_head);
- sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
- sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
- M_NOWAIT | M_ZERO);
- for (i = 0; i < nsfbufs; i++) {
- sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
- SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list);
- }
- sf_buf_alloc_want = 0;
- mtx_unlock(&sf_freelist.sf_lock);
-}
-
-/*
- * Get an sf_buf from the freelist. Will block if none are available.
- */
-struct sf_buf *
-sf_buf_alloc(struct vm_page *m)
-{
- struct sf_buf *sf;
- int error;
-
- mtx_lock(&sf_freelist.sf_lock);
- while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
- sf_buf_alloc_want++;
- error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
- "sfbufa", 0);
- sf_buf_alloc_want--;
-
- /*
- * If we got a signal, don't risk going back to sleep.
- */
- if (error)
- break;
- }
- if (sf != NULL) {
- SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list);
- sf->m = m;
- pmap_qenter(sf->kva, &sf->m, 1);
- }
- mtx_unlock(&sf_freelist.sf_lock);
- return (sf);
-}
-
-/*
- * Detatch mapped page and release resources back to the system.
- */
-void
-sf_buf_free(void *addr, void *args)
-{
- struct sf_buf *sf;
- struct vm_page *m;
-
- sf = args;
- pmap_qremove((vm_offset_t)addr, 1);
- m = sf->m;
- vm_page_lock_queues();
- vm_page_unwire(m, 0);
- /*
- * Check for the object going away on us. This can
- * happen since we don't hold a reference to it.
- * If so, we're responsible for freeing the page.
- */
- if (m->wire_count == 0 && m->object == NULL)
- vm_page_free(m);
- vm_page_unlock_queues();
- sf->m = NULL;
- mtx_lock(&sf_freelist.sf_lock);
- SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
- if (sf_buf_alloc_want > 0)
- wakeup_one(&sf_freelist);
- mtx_unlock(&sf_freelist.sf_lock);
-}
-
-/*
* sendfile(2)
*
* MPSAFE
diff --git a/sys/powerpc/aim/vm_machdep.c b/sys/powerpc/aim/vm_machdep.c
index 3f83f8843dc2..cef8327d7ad6 100644
--- a/sys/powerpc/aim/vm_machdep.c
+++ b/sys/powerpc/aim/vm_machdep.c
@@ -81,6 +81,8 @@
#include <sys/vnode.h>
#include <sys/vmmeter.h>
#include <sys/kernel.h>
+#include <sys/mbuf.h>
+#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <sys/unistd.h>
@@ -101,6 +103,20 @@
#include <sys/user.h>
+static void sf_buf_init(void *arg);
+SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
+
+/*
+ * Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the
+ * sf_freelist head with the sf_lock mutex.
+ */
+static struct {
+ SLIST_HEAD(, sf_buf) sf_head;
+ struct mtx sf_lock;
+} sf_freelist;
+
+static u_int sf_buf_alloc_want;
+
/*
* Finish a fork operation, with process p2 nearly set up.
* Copy and update the pcb, set up the stack so that the child
@@ -220,6 +236,91 @@ cpu_reset()
}
/*
+ * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
+ */
+static void
+sf_buf_init(void *arg)
+{
+ struct sf_buf *sf_bufs;
+ vm_offset_t sf_base;
+ int i;
+
+ mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF);
+ mtx_lock(&sf_freelist.sf_lock);
+ SLIST_INIT(&sf_freelist.sf_head);
+ sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
+ M_NOWAIT | M_ZERO);
+ for (i = 0; i < nsfbufs; i++) {
+ sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
+ SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list);
+ }
+ sf_buf_alloc_want = 0;
+ mtx_unlock(&sf_freelist.sf_lock);
+}
+
+/*
+ * Get an sf_buf from the freelist. Will block if none are available.
+ */
+struct sf_buf *
+sf_buf_alloc(struct vm_page *m)
+{
+ struct sf_buf *sf;
+ int error;
+
+ mtx_lock(&sf_freelist.sf_lock);
+ while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
+ sf_buf_alloc_want++;
+ error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
+ "sfbufa", 0);
+ sf_buf_alloc_want--;
+
+ /*
+ * If we got a signal, don't risk going back to sleep.
+ */
+ if (error)
+ break;
+ }
+ if (sf != NULL) {
+ SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list);
+ sf->m = m;
+ pmap_qenter(sf->kva, &sf->m, 1);
+ }
+ mtx_unlock(&sf_freelist.sf_lock);
+ return (sf);
+}
+
+/*
+ * Detatch mapped page and release resources back to the system.
+ */
+void
+sf_buf_free(void *addr, void *args)
+{
+ struct sf_buf *sf;
+ struct vm_page *m;
+
+ sf = args;
+ pmap_qremove((vm_offset_t)addr, 1);
+ m = sf->m;
+ vm_page_lock_queues();
+ vm_page_unwire(m, 0);
+ /*
+ * Check for the object going away on us. This can
+ * happen since we don't hold a reference to it.
+ * If so, we're responsible for freeing the page.
+ */
+ if (m->wire_count == 0 && m->object == NULL)
+ vm_page_free(m);
+ vm_page_unlock_queues();
+ sf->m = NULL;
+ mtx_lock(&sf_freelist.sf_lock);
+ SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
+ if (sf_buf_alloc_want > 0)
+ wakeup_one(&sf_freelist);
+ mtx_unlock(&sf_freelist.sf_lock);
+}
+
+/*
* Software interrupt handler for queued VM system processing.
*/
void
diff --git a/sys/powerpc/powerpc/vm_machdep.c b/sys/powerpc/powerpc/vm_machdep.c
index 3f83f8843dc2..cef8327d7ad6 100644
--- a/sys/powerpc/powerpc/vm_machdep.c
+++ b/sys/powerpc/powerpc/vm_machdep.c
@@ -81,6 +81,8 @@
#include <sys/vnode.h>
#include <sys/vmmeter.h>
#include <sys/kernel.h>
+#include <sys/mbuf.h>
+#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <sys/unistd.h>
@@ -101,6 +103,20 @@
#include <sys/user.h>
+static void sf_buf_init(void *arg);
+SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
+
+/*
+ * Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the
+ * sf_freelist head with the sf_lock mutex.
+ */
+static struct {
+ SLIST_HEAD(, sf_buf) sf_head;
+ struct mtx sf_lock;
+} sf_freelist;
+
+static u_int sf_buf_alloc_want;
+
/*
* Finish a fork operation, with process p2 nearly set up.
* Copy and update the pcb, set up the stack so that the child
@@ -220,6 +236,91 @@ cpu_reset()
}
/*
+ * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
+ */
+static void
+sf_buf_init(void *arg)
+{
+ struct sf_buf *sf_bufs;
+ vm_offset_t sf_base;
+ int i;
+
+ mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF);
+ mtx_lock(&sf_freelist.sf_lock);
+ SLIST_INIT(&sf_freelist.sf_head);
+ sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
+ M_NOWAIT | M_ZERO);
+ for (i = 0; i < nsfbufs; i++) {
+ sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
+ SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list);
+ }
+ sf_buf_alloc_want = 0;
+ mtx_unlock(&sf_freelist.sf_lock);
+}
+
+/*
+ * Get an sf_buf from the freelist. Will block if none are available.
+ */
+struct sf_buf *
+sf_buf_alloc(struct vm_page *m)
+{
+ struct sf_buf *sf;
+ int error;
+
+ mtx_lock(&sf_freelist.sf_lock);
+ while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
+ sf_buf_alloc_want++;
+ error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
+ "sfbufa", 0);
+ sf_buf_alloc_want--;
+
+ /*
+ * If we got a signal, don't risk going back to sleep.
+ */
+ if (error)
+ break;
+ }
+ if (sf != NULL) {
+ SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list);
+ sf->m = m;
+ pmap_qenter(sf->kva, &sf->m, 1);
+ }
+ mtx_unlock(&sf_freelist.sf_lock);
+ return (sf);
+}
+
+/*
+ * Detatch mapped page and release resources back to the system.
+ */
+void
+sf_buf_free(void *addr, void *args)
+{
+ struct sf_buf *sf;
+ struct vm_page *m;
+
+ sf = args;
+ pmap_qremove((vm_offset_t)addr, 1);
+ m = sf->m;
+ vm_page_lock_queues();
+ vm_page_unwire(m, 0);
+ /*
+ * Check for the object going away on us. This can
+ * happen since we don't hold a reference to it.
+ * If so, we're responsible for freeing the page.
+ */
+ if (m->wire_count == 0 && m->object == NULL)
+ vm_page_free(m);
+ vm_page_unlock_queues();
+ sf->m = NULL;
+ mtx_lock(&sf_freelist.sf_lock);
+ SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
+ if (sf_buf_alloc_want > 0)
+ wakeup_one(&sf_freelist);
+ mtx_unlock(&sf_freelist.sf_lock);
+}
+
+/*
* Software interrupt handler for queued VM system processing.
*/
void
diff --git a/sys/sparc64/sparc64/vm_machdep.c b/sys/sparc64/sparc64/vm_machdep.c
index 983e733894e4..2abca7aa3d88 100644
--- a/sys/sparc64/sparc64/vm_machdep.c
+++ b/sys/sparc64/sparc64/vm_machdep.c
@@ -52,7 +52,11 @@
#include <sys/proc.h>
#include <sys/bio.h>
#include <sys/buf.h>
+#include <sys/kernel.h>
#include <sys/linker_set.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <sys/unistd.h>
#include <sys/user.h>
@@ -63,6 +67,7 @@
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
+#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
@@ -81,6 +86,20 @@
#include <machine/tlb.h>
#include <machine/tstate.h>
+static void sf_buf_init(void *arg);
+SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
+
+/*
+ * Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the
+ * sf_freelist head with the sf_lock mutex.
+ */
+static struct {
+ SLIST_HEAD(, sf_buf) sf_head;
+ struct mtx sf_lock;
+} sf_freelist;
+
+static u_int sf_buf_alloc_want;
+
PMAP_STATS_VAR(uma_nsmall_alloc);
PMAP_STATS_VAR(uma_nsmall_alloc_oc);
PMAP_STATS_VAR(uma_nsmall_free);
@@ -330,6 +349,91 @@ is_physical_memory(vm_paddr_t addr)
return (0);
}
+/*
+ * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
+ */
+static void
+sf_buf_init(void *arg)
+{
+ struct sf_buf *sf_bufs;
+ vm_offset_t sf_base;
+ int i;
+
+ mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF);
+ mtx_lock(&sf_freelist.sf_lock);
+ SLIST_INIT(&sf_freelist.sf_head);
+ sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
+ M_NOWAIT | M_ZERO);
+ for (i = 0; i < nsfbufs; i++) {
+ sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
+ SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list);
+ }
+ sf_buf_alloc_want = 0;
+ mtx_unlock(&sf_freelist.sf_lock);
+}
+
+/*
+ * Get an sf_buf from the freelist. Will block if none are available.
+ */
+struct sf_buf *
+sf_buf_alloc(struct vm_page *m)
+{
+ struct sf_buf *sf;
+ int error;
+
+ mtx_lock(&sf_freelist.sf_lock);
+ while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
+ sf_buf_alloc_want++;
+ error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
+ "sfbufa", 0);
+ sf_buf_alloc_want--;
+
+ /*
+ * If we got a signal, don't risk going back to sleep.
+ */
+ if (error)
+ break;
+ }
+ if (sf != NULL) {
+ SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list);
+ sf->m = m;
+ pmap_qenter(sf->kva, &sf->m, 1);
+ }
+ mtx_unlock(&sf_freelist.sf_lock);
+ return (sf);
+}
+
+/*
+ * Detatch mapped page and release resources back to the system.
+ */
+void
+sf_buf_free(void *addr, void *args)
+{
+ struct sf_buf *sf;
+ struct vm_page *m;
+
+ sf = args;
+ pmap_qremove((vm_offset_t)addr, 1);
+ m = sf->m;
+ vm_page_lock_queues();
+ vm_page_unwire(m, 0);
+ /*
+ * Check for the object going away on us. This can
+ * happen since we don't hold a reference to it.
+ * If so, we're responsible for freeing the page.
+ */
+ if (m->wire_count == 0 && m->object == NULL)
+ vm_page_free(m);
+ vm_page_unlock_queues();
+ sf->m = NULL;
+ mtx_lock(&sf_freelist.sf_lock);
+ SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
+ if (sf_buf_alloc_want > 0)
+ wakeup_one(&sf_freelist);
+ mtx_unlock(&sf_freelist.sf_lock);
+}
+
void
swi_vm(void *v)
{