-/* $OpenBSD: pf.c,v 1.1033 2017/05/31 09:19:10 bluhm Exp $ */
+/* $OpenBSD: pf.c,v 1.1034 2017/06/05 22:18:28 sashan Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
pf_state_insert(struct pfi_kif *kif, struct pf_state_key **skw,
struct pf_state_key **sks, struct pf_state *s)
{
- NET_ASSERT_LOCKED();
+ PF_ASSERT_LOCKED();
s->kif = kif;
if (*skw == *sks) {
{
struct pf_rule *r;
- NET_ASSERT_LOCKED();
+ PF_ASSERT_LOCKED();
if (SLIST_EMPTY(&pf_rule_gcl))
return;
NET_LOCK(s);
+ PF_LOCK();
/* process a fraction of the state table every second */
pf_purge_expired_states(1 + (pf_status.states
/ pf_default_rule.timeout[PFTM_INTERVAL]));
/* purge other expired types every PFTM_INTERVAL seconds */
if (++nloops >= pf_default_rule.timeout[PFTM_INTERVAL]) {
- pf_purge_expired_fragments();
pf_purge_expired_src_nodes(0);
pf_purge_expired_rules();
+ }
+ PF_UNLOCK();
+ /*
+ * Fragments don't require PF_LOCK(), they use their own lock.
+ */
+ if (nloops >= pf_default_rule.timeout[PFTM_INTERVAL]) {
+ pf_purge_expired_fragments();
nloops = 0;
}
{
struct pf_src_node *cur, *next;
- NET_ASSERT_LOCKED();
+ PF_ASSERT_LOCKED();
for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) {
next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur);
void
pf_remove_state(struct pf_state *cur)
{
- NET_ASSERT_LOCKED();
+ PF_ASSERT_LOCKED();
/* handle load balancing related tasks */
pf_postprocess_addr(cur);
{
struct pf_rule_item *ri;
- NET_ASSERT_LOCKED();
+ PF_ASSERT_LOCKED();
#if NPFSYNC > 0
if (pfsync_state_in_use(cur))
static struct pf_state *cur = NULL;
struct pf_state *next;
- NET_ASSERT_LOCKED();
+ PF_ASSERT_LOCKED();
while (maxcheck--) {
/* wrap to start of list when we hit the end */
case IPPROTO_TCP:
sport = pd->hdr.tcp.th_sport;
dport = pd->hdr.tcp.th_dport;
- NET_ASSERT_LOCKED();
+ PF_ASSERT_LOCKED();
tb = &tcbtable;
break;
case IPPROTO_UDP:
sport = pd->hdr.udp.uh_sport;
dport = pd->hdr.udp.uh_dport;
- NET_ASSERT_LOCKED();
+ PF_ASSERT_LOCKED();
tb = &udbtable;
break;
default:
/* if packet sits in reassembly queue, return without error */
if (pd.m == NULL)
return PF_PASS;
+
if (action != PF_PASS) {
#if NPFLOG > 0
pd.pflog |= PF_LOG_FORCE;
}
pd.m->m_pkthdr.pf.flags |= PF_TAG_PROCESSED;
+ /* lock the lookup/write section of pf_test() */
+ PF_LOCK();
+
switch (pd.virtual_proto) {
case PF_VPROTO_FRAGMENT: {
REASON_SET(&reason, PFRES_NORM);
DPFPRINTF(LOG_NOTICE,
"dropping IPv6 packet with ICMPv4 payload");
- goto done;
+ goto unlock;
}
action = pf_test_state_icmp(&pd, &s, &reason);
if (action == PF_PASS || action == PF_AFRT) {
REASON_SET(&reason, PFRES_NORM);
DPFPRINTF(LOG_NOTICE,
"dropping IPv4 packet with ICMPv6 payload");
- goto done;
+ goto unlock;
}
action = pf_test_state_icmp(&pd, &s, &reason);
if (action == PF_PASS || action == PF_AFRT) {
pqid = 1;
action = pf_normalize_tcp(&pd);
if (action == PF_DROP)
- goto done;
+ goto unlock;
}
action = pf_test_state(&pd, &s, &reason);
if (action == PF_PASS || action == PF_AFRT) {
break;
}
+unlock:
+ PF_UNLOCK();
+
+ /*
+ * At the moment, we rely on NET_LOCK() to prevent removal of items
+ * we've collected above ('s', 'r', 'anchor' and 'ruleset'). They'll
+ * have to be refcounted when NET_LOCK() is gone.
+ */
+
done:
if (action != PF_DROP) {
if (s) {
}
*m0 = pd.m;
+
return (action);
}
-/* $OpenBSD: pf_ioctl.c,v 1.314 2017/06/01 14:38:28 patrick Exp $ */
+/* $OpenBSD: pf_ioctl.c,v 1.315 2017/06/05 22:18:28 sashan Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
+#ifdef WITH_PF_LOCK
+struct rwlock pf_lock = RWLOCK_INITIALIZER("pf_lock");
+#endif /* WITH_PF_LOCK */
+
#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
#endif
}
NET_LOCK(s);
+ PF_LOCK();
switch (cmd) {
case DIOCSTART:
break;
}
fail:
+ PF_UNLOCK();
NET_UNLOCK(s);
return (error);
}
-/* $OpenBSD: pf_norm.c,v 1.204 2017/05/15 12:26:00 mpi Exp $ */
+/* $OpenBSD: pf_norm.c,v 1.205 2017/06/05 22:18:28 sashan Exp $ */
/*
* Copyright 2001 Niels Provos <provos@citi.umich.edu>
#include <sys/time.h>
#include <sys/pool.h>
#include <sys/syslog.h>
+#include <sys/mutex.h>
#include <net/if.h>
#include <net/if_var.h>
struct pool pf_state_scrub_pl;
int pf_nfrents;
+#ifdef WITH_PF_LOCK
+struct mutex pf_frag_mtx;
+
+#define PF_FRAG_LOCK_INIT() mtx_init(&pf_frag_mtx, IPL_SOFTNET)
+#define PF_FRAG_LOCK() mtx_enter(&pf_frag_mtx)
+#define PF_FRAG_UNLOCK() mtx_leave(&pf_frag_mtx)
+#else /* !WITH_PF_LOCK */
+#define PF_FRAG_LOCK_INIT() (void)(0)
+#define PF_FRAG_LOCK() (void)(0)
+#define PF_FRAG_UNLOCK() (void)(0)
+#endif /* WITH_PF_LOCK */
+
void
pf_normalize_init(void)
{
pool_sethardlimit(&pf_frent_pl, PFFRAG_FRENT_HIWAT, NULL, 0);
TAILQ_INIT(&pf_fragqueue);
+
+ PF_FRAG_LOCK_INIT();
}
static __inline int
struct pf_fragment *frag;
int32_t expire;
- NET_ASSERT_LOCKED();
+ PF_ASSERT_UNLOCKED();
expire = time_uptime - pf_default_rule.timeout[PFTM_FRAG];
+
+ PF_FRAG_LOCK();
while ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) != NULL) {
if (frag->fr_timeout > expire)
break;
DPFPRINTF(LOG_NOTICE, "expiring %d(%p)", frag->fr_id, frag);
pf_free_fragment(frag);
}
+ PF_FRAG_UNLOCK();
}
/*
key.fr_id = ip->ip_id;
key.fr_direction = dir;
- if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL)
+ PF_FRAG_LOCK();
+ if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) {
+ PF_FRAG_UNLOCK();
return (PF_DROP);
+ }
/* The mbuf is part of the fragment entry, no direct free or access */
m = *m0 = NULL;
- if (!pf_isfull_fragment(frag))
+ if (!pf_isfull_fragment(frag)) {
+ PF_FRAG_UNLOCK();
return (PF_PASS); /* drop because *m0 is NULL, no error */
+ }
/* We have all the data */
frent = TAILQ_FIRST(&frag->fr_queue);
ip->ip_off &= ~(IP_MF|IP_OFFMASK);
if (hdrlen + total > IP_MAXPACKET) {
+ PF_FRAG_UNLOCK();
DPFPRINTF(LOG_NOTICE, "drop: too big: %d", total);
ip->ip_len = 0;
REASON_SET(reason, PFRES_SHORT);
return (PF_DROP);
}
+ PF_FRAG_UNLOCK();
DPFPRINTF(LOG_INFO, "complete: %p(%d)", m, ntohs(ip->ip_len));
return (PF_PASS);
}
key.fr_id = fraghdr->ip6f_ident;
key.fr_direction = dir;
- if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL)
+ PF_FRAG_LOCK();
+ if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) {
+ PF_FRAG_UNLOCK();
return (PF_DROP);
+ }
/* The mbuf is part of the fragment entry, no direct free or access */
m = *m0 = NULL;
- if (!pf_isfull_fragment(frag))
+ if (!pf_isfull_fragment(frag)) {
+ PF_FRAG_UNLOCK();
return (PF_PASS); /* drop because *m0 is NULL, no error */
+ }
/* We have all the data */
extoff = frent->fe_extoff;
ip6->ip6_nxt = proto;
if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
+ PF_FRAG_UNLOCK();
DPFPRINTF(LOG_NOTICE, "drop: too big: %d", total);
ip6->ip6_plen = 0;
REASON_SET(reason, PFRES_SHORT);
/* PF_DROP requires a valid mbuf *m0 in pf_test6() */
return (PF_DROP);
}
+ PF_FRAG_UNLOCK();
DPFPRINTF(LOG_INFO, "complete: %p(%d)", m, ntohs(ip6->ip6_plen));
return (PF_PASS);
fail:
+ PF_FRAG_UNLOCK();
REASON_SET(reason, PFRES_MEMORY);
/* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later */
return (PF_DROP);
-/* $OpenBSD: pfvar_priv.h,v 1.2 2016/11/22 19:29:54 procter Exp $ */
+/* $OpenBSD: pfvar_priv.h,v 1.3 2017/06/05 22:18:28 sashan Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
#ifdef _KERNEL
+#include <sys/rwlock.h>
+
+extern struct rwlock pf_lock;
+
struct pf_pdesc {
struct {
int done;
} hdr;
};
+#ifdef WITH_PF_LOCK
+extern struct rwlock pf_lock;
+
+#define PF_LOCK() do { \
+ NET_ASSERT_LOCKED(); \
+ rw_enter_write(&pf_lock); \
+ } while (0)
+
+#define PF_UNLOCK() do { \
+ PF_ASSERT_LOCKED(); \
+ rw_exit_write(&pf_lock); \
+ } while (0)
+
+#define PF_ASSERT_LOCKED() do { \
+ if (rw_status(&pf_lock) != RW_WRITE) \
+ splassert_fail(RW_WRITE, \
+ rw_status(&pf_lock),__func__);\
+ } while (0)
+
+#define PF_ASSERT_UNLOCKED() do { \
+ if (rw_status(&pf_lock) == RW_WRITE) \
+ splassert_fail(0, rw_status(&pf_lock), __func__);\
+ } while (0)
+#else /* !WITH_PF_LOCK */
+#define PF_LOCK() (void)(0)
+#define PF_UNLOCK() (void)(0)
+#define PF_ASSERT_LOCKED() (void)(0)
+#define PF_ASSERT_UNLOCKED() (void)(0)
+#endif /* WITH_PF_LOCK */
+
#endif /* _KERNEL */
#endif /* _NET_PFVAR_PRIV_H_ */