diff options
| author | Brian Feldman <green@FreeBSD.org> | 1999-12-12 05:52:51 +0000 |
|---|---|---|
| committer | Brian Feldman <green@FreeBSD.org> | 1999-12-12 05:52:51 +0000 |
| commit | f48b807fc0a39a0b5826b357e4b729803e58a7ba (patch) | |
| tree | 66ccefed731dc50bd2da132fe1d54e83aea84b8d | |
| parent | a58f6db67745cac458e460409f8e6dbd8651635c (diff) | |
Notes
| -rw-r--r-- | sys/conf/param.c | 1 | ||||
| -rw-r--r-- | sys/kern/subr_param.c | 1 | ||||
| -rw-r--r-- | sys/kern/uipc_mbuf.c | 160 | ||||
| -rw-r--r-- | sys/kern/uipc_socket.c | 12 | ||||
| -rw-r--r-- | sys/kern/uipc_syscalls.c | 4 | ||||
| -rw-r--r-- | sys/sys/mbuf.h | 88 |
6 files changed, 232 insertions, 34 deletions
diff --git a/sys/conf/param.c b/sys/conf/param.c index 9a0a85f08a0a..7ae2ade9bdca 100644 --- a/sys/conf/param.c +++ b/sys/conf/param.c @@ -82,6 +82,7 @@ int maxprocperuid = NPROC-1; /* maximum # of processes per user */ int maxfiles = MAXFILES; /* system wide open files limit */ int maxfilesperproc = MAXFILES; /* per-process open files limit */ int ncallout = 16 + NPROC + MAXFILES; /* maximum # of timer events */ +int mbuf_wait = 32; /* mbuf sleep time in ticks */ /* maximum # of sf_bufs (sendfile(2) zero-copy virtual buffers) */ #ifndef NSFBUFS diff --git a/sys/kern/subr_param.c b/sys/kern/subr_param.c index 9a0a85f08a0a..7ae2ade9bdca 100644 --- a/sys/kern/subr_param.c +++ b/sys/kern/subr_param.c @@ -82,6 +82,7 @@ int maxprocperuid = NPROC-1; /* maximum # of processes per user */ int maxfiles = MAXFILES; /* system wide open files limit */ int maxfilesperproc = MAXFILES; /* per-process open files limit */ int ncallout = 16 + NPROC + MAXFILES; /* maximum # of timer events */ +int mbuf_wait = 32; /* mbuf sleep time in ticks */ /* maximum # of sf_bufs (sendfile(2) zero-copy virtual buffers) */ #ifndef NSFBUFS diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c index 1684c920d815..9650c68e8ea8 100644 --- a/sys/kern/uipc_mbuf.c +++ b/sys/kern/uipc_mbuf.c @@ -48,6 +48,10 @@ #include <vm/vm_kern.h> #include <vm/vm_extern.h> +#ifdef INVARIANTS +#include <machine/cpu.h> +#endif + static void mbinit __P((void *)); SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL) @@ -71,6 +75,8 @@ SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, &max_datalen, 0, ""); +SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, + &mbuf_wait, 0, ""); SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, ""); SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, &nmbclusters, 0, "Maximum number of mbuf clusters avaliable"); @@ -132,10 +138,15 @@ m_mballoc(nmb, how) register int i; int nbytes; - /* Once we run out of map space, it will be impossible to get - * any more (nothing is ever freed back to the map) (XXX which - * is dumb). (however you are not dead as m_reclaim might - * still be able to free a substantial amount of space). + /* + * Once we run out of map space, it will be impossible to get + * any more (nothing is ever freed back to the map) + * -- however you are not dead as m_reclaim might + * still be able to free a substantial amount of space. + * + * XXX Furthermore, we can also work with "recycled" mbufs (when + * we're calling with M_WAIT the sleep procedure will be woken + * up when an mbuf is freed. See m_mballoc_wait()). */ if (mb_map_full) return (0); @@ -164,6 +175,52 @@ m_mballoc(nmb, how) return (1); } +/* + * Once the mb_map has been exhausted and if the call to the allocation macros + * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely + * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a + * designated (mbuf_wait) time. + */ +struct mbuf * +m_mballoc_wait(int caller, int type) +{ + struct mbuf *p; + int s; + + m_mballoc_wid++; + if ((tsleep(&m_mballoc_wid, PVM, "mballc", mbuf_wait)) == EWOULDBLOCK) + m_mballoc_wid--; + + /* + * Now that we (think) that we've got something, we will redo an + * MGET, but avoid getting into another instance of m_mballoc_wait() + * XXX: We retry to fetch _even_ if the sleep timed out. This is left + * this way, purposely, in the [unlikely] case that an mbuf was + * freed but the sleep was not awakened in time. + */ + p = NULL; + switch (caller) { + case MGET_C: + MGET(p, M_DONTWAIT, type); + break; + case MGETHDR_C: + MGETHDR(p, M_DONTWAIT, type); + break; + default: + panic("m_mballoc_wait: invalid caller (%d)", caller); + } + + s = splimp(); + if (p != NULL) { /* We waited and got something... */ + mbstat.m_wait++; + /* Wake up another if we have more free. */ + if (mmbfree != NULL) + m_mballoc_wakeup(); + } + splx(s); + return (p); +} + #if MCLBYTES > PAGE_SIZE static int i_want_my_mcl; @@ -210,7 +267,8 @@ m_clalloc(ncl, how) /* * Once we run out of map space, it will be impossible * to get any more (nothing is ever freed back to the - * map). + * map). From this point on, we solely rely on freed + * mclusters. */ if (mb_map_full) { mbstat.m_drops++; @@ -253,6 +311,47 @@ m_clalloc(ncl, how) } /* + * Once the mb_map submap has been exhausted and the allocation is called with + * M_WAIT, we rely on the mclfree union pointers. If nothing is free, we will + * sleep for a designated amount of time (mbuf_wait) or until we're woken up + * due to sudden mcluster availability. + */ +caddr_t +m_clalloc_wait(void) +{ + caddr_t p; + int s; + +#ifdef __i386__ + /* If in interrupt context, and INVARIANTS, maintain sanity and die. */ + KASSERT(intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT")); +#endif + + /* Sleep until something's available or until we expire. */ + m_clalloc_wid++; + if ((tsleep(&m_clalloc_wid, PVM, "mclalc", mbuf_wait)) == EWOULDBLOCK) + m_clalloc_wid--; + + /* + * Now that we (think) that we've got something, we will redo and + * MGET, but avoid getting into another instance of m_clalloc_wait() + */ + p = NULL; + MCLALLOC(p, M_DONTWAIT); + + s = splimp(); + if (p != NULL) { /* We waited and got something... */ + mbstat.m_wait++; + /* Wake up another if we have more free. */ + if (mclfree != NULL) + m_clalloc_wakeup(); + } + + splx(s); + return (p); +} + +/* * When MGET fails, ask protocols to free space when short of memory, * then re-attempt to allocate an mbuf. */ @@ -265,19 +364,30 @@ m_retry(i, t) /* * Must only do the reclaim if not in an interrupt context. */ - if (i == M_WAIT) + if (i == M_WAIT) { +#ifdef __i386__ + KASSERT(intr_nesting_level == 0, + ("MBALLOC: CANNOT WAIT IN INTERRUPT")); +#endif m_reclaim(); + } + + /* + * Both m_mballoc_wait and m_retry must be nulled because + * when the MGET macro is run from here, we deffinately do _not_ + * want to enter an instance of m_mballoc_wait() or m_retry() (again!) + */ +#define m_mballoc_wait(caller,type) (struct mbuf *)0 #define m_retry(i, t) (struct mbuf *)0 MGET(m, i, t); #undef m_retry - if (m != NULL) { +#undef m_mballoc_wait + + if (m != NULL) mbstat.m_wait++; - } else { - if (i == M_DONTWAIT) - mbstat.m_drops++; - else - panic("Out of mbuf clusters"); - } + else + mbstat.m_drops++; + return (m); } @@ -293,19 +403,25 @@ m_retryhdr(i, t) /* * Must only do the reclaim if not in an interrupt context. */ - if (i == M_WAIT) + if (i == M_WAIT) { +#ifdef __i386__ + KASSERT(intr_nesting_level == 0, + ("MBALLOC: CANNOT WAIT IN INTERRUPT")); +#endif m_reclaim(); + } + +#define m_mballoc_wait(caller,type) (struct mbuf *)0 #define m_retryhdr(i, t) (struct mbuf *)0 MGETHDR(m, i, t); #undef m_retryhdr - if (m != NULL) { +#undef m_mballoc_wait + + if (m != NULL) mbstat.m_wait++; - } else { - if (i == M_DONTWAIT) - mbstat.m_drops++; - else - panic("Out of mbuf clusters"); - } + else + mbstat.m_drops++; + return (m); } @@ -1034,7 +1150,7 @@ void m_print(const struct mbuf *m) { int len; - const struct mbuf *m2; + struct mbuf *m2; len = m->m_pkthdr.len; m2 = m; diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c index c6d107cb72d2..9e4fa561c1be 100644 --- a/sys/kern/uipc_socket.c +++ b/sys/kern/uipc_socket.c @@ -507,11 +507,19 @@ restart: } else do { if (top == 0) { MGETHDR(m, M_WAIT, MT_DATA); + if (m == NULL) { + error = ENOBUFS; + goto release; + } mlen = MHLEN; m->m_pkthdr.len = 0; m->m_pkthdr.rcvif = (struct ifnet *)0; } else { MGET(m, M_WAIT, MT_DATA); + if (m == NULL) { + error = ENOBUFS; + goto release; + } mlen = MLEN; } if (resid >= MINCLSIZE) { @@ -636,6 +644,10 @@ soreceive(so, psa, uio, mp0, controlp, flagsp) flags = 0; if (flags & MSG_OOB) { m = m_get(M_WAIT, MT_DATA); + if (m == NULL) { + error = ENOBUFS; + goto release; + } error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK); if (error) goto bad; diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c index af24c03df2f1..268125f94773 100644 --- a/sys/kern/uipc_syscalls.c +++ b/sys/kern/uipc_syscalls.c @@ -1615,6 +1615,10 @@ retry_lookup: * Get an mbuf header and set it up as having external storage. */ MGETHDR(m, M_WAIT, MT_DATA); + if (m == NULL) { + error = ENOBUFS; + goto done; + } m->m_ext.ext_free = sf_buf_free; m->m_ext.ext_ref = sf_buf_ref; m->m_ext.ext_buf = (void *)sf->kva; diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h index a4002aad28f5..b4f5396c9937 100644 --- a/sys/sys/mbuf.h +++ b/sys/sys/mbuf.h @@ -157,6 +157,24 @@ struct mbuf { #define M_DONTWAIT 1 #define M_WAIT 0 +/* mbuf and mbuf cluster wait count variables... */ +static u_int m_mballoc_wid = 0, m_clalloc_wid = 0; + +static __inline void m_mballoc_wakeup __P((void)); +static __inline void m_clalloc_wakeup __P((void)); +/* We'll need wakeup_one(). */ +#ifdef KERNEL +#include <sys/systm.h> +#endif + +/* + * Identifying number passed to the m_mballoc_wait function, allowing + * us to determine that the call came from an MGETHDR and not an MGET -- + * this way we are sure to run the MGETHDR macro when the call came from there. + */ +#define MGETHDR_C 1 +#define MGET_C 2 + /* Freelists: * * Normal mbuf clusters are normally treated as character arrays @@ -169,6 +187,37 @@ union mcluster { }; /* + * mbuf and mbuf cluster wakeup inline routines. + */ +/* + * Wakeup the next instance -- if any -- of m_mballoc_wait() which + * is waiting for an mbuf to be freed. Make sure to decrement sleep count. + * XXX: If there is another free mbuf, this routine will be called [again] + * from the m_mballoc_wait routine in order to wake another sleep instance. + * Should be called at splimp() + */ +static __inline void +m_mballoc_wakeup(void) +{ + if (m_mballoc_wid) { + m_mballoc_wid--; + wakeup_one(&m_mballoc_wid); + } +} + +/* + * Same as above, only for mbuf cluster(s). Should be called at splimp() + */ +static __inline void +m_clalloc_wakeup(void) +{ + if (m_clalloc_wid) { + m_clalloc_wid--; + wakeup_one(&m_clalloc_wid); + } +} + +/* * mbuf utility macros: * * MBUFLOCK(code) @@ -191,7 +240,8 @@ union mcluster { * allocates an mbuf and initializes it to contain a packet header * and internal data. */ -#define MGET(m, how, type) { \ +#define MGET(m, how, type) \ + do { \ int _ms = splimp(); \ if (mmbfree == 0) \ (void)m_mballoc(1, (how)); \ @@ -205,13 +255,15 @@ union mcluster { (m)->m_data = (m)->m_dat; \ (m)->m_flags = 0; \ splx(_ms); \ - } else { \ + } else { \ splx(_ms); \ - (m) = m_retry((how), (type)); \ - } \ -} + if (((m) = m_retry((how), (type))) == NULL && (how) == M_WAIT) \ + (m) = m_mballoc_wait(MGET_C, (type)); \ + } \ + } while (0) -#define MGETHDR(m, how, type) { \ +#define MGETHDR(m, how, type) \ + do { \ int _ms = splimp(); \ if (mmbfree == 0) \ (void)m_mballoc(1, (how)); \ @@ -225,11 +277,13 @@ union mcluster { (m)->m_data = (m)->m_pktdat; \ (m)->m_flags = M_PKTHDR; \ splx(_ms); \ - } else { \ + } else { \ splx(_ms); \ - (m) = m_retryhdr((how), (type)); \ - } \ -} + if (((m) = m_retryhdr((how), (type))) == NULL && \ + (how) == M_WAIT) \ + (m) = m_mballoc_wait(MGETHDR_C, (type)); \ + } \ + } while (0) /* * Mbuf cluster macros. @@ -240,15 +294,20 @@ union mcluster { * freeing the cluster if the reference count has reached 0. */ #define MCLALLOC(p, how) \ - MBUFLOCK( \ + do { \ + int _ms = splimp(); \ if (mclfree == 0) \ (void)m_clalloc(1, (how)); \ if (((p) = (caddr_t)mclfree) != 0) { \ ++mclrefcnt[mtocl(p)]; \ mbstat.m_clfree--; \ mclfree = ((union mcluster *)(p))->mcl_next; \ + splx(_ms); \ + } else if ((how) == M_WAIT) { \ + splx(_ms); \ + (p) = m_clalloc_wait(); \ } \ - ) + } while (0) #define MCLGET(m, how) \ { MCLALLOC((m)->m_ext.ext_buf, (how)); \ @@ -267,6 +326,7 @@ union mcluster { ((union mcluster *)(p))->mcl_next = mclfree; \ mclfree = (union mcluster *)(p); \ mbstat.m_clfree++; \ + m_clalloc_wakeup(); \ } \ } while (0) @@ -307,6 +367,7 @@ union mcluster { mbstat.m_mtypes[MT_FREE]++; \ (m)->m_next = mmbfree; \ mmbfree = (m); \ + m_mballoc_wakeup(); \ ) /* @@ -412,7 +473,9 @@ extern int max_linkhdr; /* largest link-level header */ extern int max_protohdr; /* largest protocol header */ extern int max_hdr; /* largest link+protocol header */ extern int max_datalen; /* MHLEN - max_hdr */ +extern int mbuf_wait; /* mbuf sleep time */ +struct mbuf *m_mballoc_wait __P((int, int)); struct mbuf *m_copym __P((struct mbuf *, int, int, int)); struct mbuf *m_copypacket __P((struct mbuf *, int)); struct mbuf *m_devget __P((char *, int, int, struct ifnet *, @@ -432,6 +495,7 @@ void m_adj __P((struct mbuf *, int)); void m_cat __P((struct mbuf *,struct mbuf *)); int m_mballoc __P((int, int)); int m_clalloc __P((int, int)); +caddr_t m_clalloc_wait __P((void)); void m_copyback __P((struct mbuf *, int, int, caddr_t)); void m_copydata __P((struct mbuf *,int,int,caddr_t)); void m_freem __P((struct mbuf *)); |
