summaryrefslogtreecommitdiff
path: root/net/pf_norm.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/pf_norm.c')
-rw-r--r--net/pf_norm.c32
1 files changed, 4 insertions, 28 deletions
diff --git a/net/pf_norm.c b/net/pf_norm.c
index df339ae6f691..ab3a161f83de 100644
--- a/net/pf_norm.c
+++ b/net/pf_norm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pf_norm.c,v 1.107 2006/04/16 00:59:52 pascoe Exp $ */
+/* $OpenBSD: pf_norm.c,v 1.109 2007/05/28 17:16:39 henning Exp $ */
/*
* Copyright 2001 Niels Provos <provos@citi.umich.edu>
@@ -929,18 +929,6 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
if (m == NULL)
return (PF_DROP);
- /* use mtag from concatenated mbuf chain */
- pd->pf_mtag = pf_find_mtag(m);
-#ifdef DIAGNOSTIC
- if (pd->pf_mtag == NULL) {
- printf("%s: pf_find_mtag returned NULL(1)\n", __func__);
- if ((pd->pf_mtag = pf_get_mtag(m)) == NULL) {
- m_freem(m);
- *m0 = NULL;
- goto no_mem;
- }
- }
-#endif
if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
goto drop;
@@ -949,7 +937,7 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
/* non-buffering fragment cache (drops or masks overlaps) */
int nomem = 0;
- if (dir == PF_OUT && pd->pf_mtag->flags & PF_TAG_FRAGCACHE) {
+ if (dir == PF_OUT && m->m_pkthdr.pf.flags & PF_TAG_FRAGCACHE) {
/*
* Already passed the fragment cache in the
* input direction. If we continued, it would
@@ -976,20 +964,8 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
goto drop;
}
- /* use mtag from copied and trimmed mbuf chain */
- pd->pf_mtag = pf_find_mtag(m);
-#ifdef DIAGNOSTIC
- if (pd->pf_mtag == NULL) {
- printf("%s: pf_find_mtag returned NULL(2)\n", __func__);
- if ((pd->pf_mtag = pf_get_mtag(m)) == NULL) {
- m_freem(m);
- *m0 = NULL;
- goto no_mem;
- }
- }
-#endif
if (dir == PF_IN)
- pd->pf_mtag->flags |= PF_TAG_FRAGCACHE;
+ m->m_pkthdr.pf.flags |= PF_TAG_FRAGCACHE;
if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
goto drop;
@@ -1658,7 +1634,7 @@ pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
* network conditions that re-order packets and
* cause our view of them to decrease. For now the
* only lowerbound we can safely determine is that
- * the TS echo will never be less than the orginal
+ * the TS echo will never be less than the original
* TS. XXX There is probably a better lowerbound.
* Remove TS_MAX_CONN with better lowerbound check.
* tescr >= other original TS