summaryrefslogtreecommitdiff
path: root/sys/powerpc/aim/vm_machdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/powerpc/aim/vm_machdep.c')
-rw-r--r--sys/powerpc/aim/vm_machdep.c145
1 files changed, 137 insertions, 8 deletions
diff --git a/sys/powerpc/aim/vm_machdep.c b/sys/powerpc/aim/vm_machdep.c
index 9b0faf34fa76..e20ba7b99077 100644
--- a/sys/powerpc/aim/vm_machdep.c
+++ b/sys/powerpc/aim/vm_machdep.c
@@ -101,6 +101,37 @@
#include <vm/vm_extern.h>
/*
+ * On systems without a direct mapped region (e.g. PPC64),
+ * we use the same code as the Book E implementation. Since
+ * we need to have runtime detection of this, define some machinery
+ * for sf_bufs in this case, and ignore it on systems with direct maps.
+ */
+
+#ifndef NSFBUFS
+#define NSFBUFS (512 + maxusers * 16)
+#endif
+
+static void sf_buf_init(void *arg);
+SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL);
+
+LIST_HEAD(sf_head, sf_buf);
+
+/* A hash table of active sendfile(2) buffers */
+static struct sf_head *sf_buf_active;
+static u_long sf_buf_hashmask;
+
+#define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask)
+
+static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
+static u_int sf_buf_alloc_want;
+
+/*
+ * A lock used to synchronize access to the hash table and free list
+ */
+static struct mtx sf_buf_lock;
+
+
+/*
* Finish a fork operation, with process p2 nearly set up.
* Copy and update the pcb, set up the stack so that the child
* ready to run and return to user mode.
@@ -202,24 +233,122 @@ cpu_reset()
}
/*
- * Allocate an sf_buf for the given vm_page. On this machine, however, there
- * is no sf_buf object. Instead, an opaque pointer to the given vm_page is
- * returned.
+ * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
*/
-struct sf_buf *
-sf_buf_alloc(struct vm_page *m, int pri)
+static void
+sf_buf_init(void *arg)
{
+ struct sf_buf *sf_bufs;
+ vm_offset_t sf_base;
+ int i;
+
+ /* Don't bother on systems with a direct map */
- return ((struct sf_buf *)m);
+ if (hw_direct_map)
+ return;
+
+ nsfbufs = NSFBUFS;
+ TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
+
+ sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
+ TAILQ_INIT(&sf_buf_freelist);
+ sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, M_NOWAIT | M_ZERO);
+
+ for (i = 0; i < nsfbufs; i++) {
+ sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
+ TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
+ }
+ sf_buf_alloc_want = 0;
+ mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
+}
+
+/*
+ * Get an sf_buf from the freelist. Will block if none are available.
+ */
+struct sf_buf *
+sf_buf_alloc(struct vm_page *m, int flags)
+{
+ struct sf_head *hash_list;
+ struct sf_buf *sf;
+ int error;
+
+ if (hw_direct_map) {
+ /* Shortcut the direct mapped case */
+
+ return ((struct sf_buf *)m);
+ }
+
+ hash_list = &sf_buf_active[SF_BUF_HASH(m)];
+ mtx_lock(&sf_buf_lock);
+ LIST_FOREACH(sf, hash_list, list_entry) {
+ if (sf->m == m) {
+ sf->ref_count++;
+ if (sf->ref_count == 1) {
+ TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
+ nsfbufsused++;
+ nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
+ }
+ goto done;
+ }
+ }
+
+ while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
+ if (flags & SFB_NOWAIT)
+ goto done;
+
+ sf_buf_alloc_want++;
+ mbstat.sf_allocwait++;
+ error = msleep(&sf_buf_freelist, &sf_buf_lock,
+ (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
+ sf_buf_alloc_want--;
+
+ /*
+ * If we got a signal, don't risk going back to sleep.
+ */
+ if (error)
+ goto done;
+ }
+
+ TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
+ if (sf->m != NULL)
+ LIST_REMOVE(sf, list_entry);
+
+ LIST_INSERT_HEAD(hash_list, sf, list_entry);
+ sf->ref_count = 1;
+ sf->m = m;
+ nsfbufsused++;
+ nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
+ pmap_qenter(sf->kva, &sf->m, 1);
+done:
+ mtx_unlock(&sf_buf_lock);
+ return (sf);
}
/*
- * Free the sf_buf. In fact, do nothing because there are no resources
- * associated with the sf_buf.
+ * Detatch mapped page and release resources back to the system.
+ *
+ * Remove a reference from the given sf_buf, adding it to the free
+ * list when its reference count reaches zero. A freed sf_buf still,
+ * however, retains its virtual-to-physical mapping until it is
+ * recycled or reactivated by sf_buf_alloc(9).
*/
void
sf_buf_free(struct sf_buf *sf)
{
+ if (hw_direct_map)
+ return;
+
+ mtx_lock(&sf_buf_lock);
+ sf->ref_count--;
+ if (sf->ref_count == 0) {
+ TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
+ nsfbufsused--;
+
+ if (sf_buf_alloc_want > 0)
+ wakeup_one(&sf_buf_freelist);
+ }
+ mtx_unlock(&sf_buf_lock);
}
/*