-/* $OpenBSD: if_pfsync.c,v 1.306 2022/11/05 22:33:11 jan Exp $ */
+/* $OpenBSD: if_pfsync.c,v 1.307 2022/11/06 18:05:05 dlg Exp $ */
/*
* Copyright (c) 2002 Michael Shalayeff
void pfsyncattach(int);
int pfsync_clone_create(struct if_clone *, int);
int pfsync_clone_destroy(struct ifnet *);
-int pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
- struct pf_state_peer *);
void pfsync_update_net_tdb(struct pfsync_tdb *);
int pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
struct rtentry *);
sc->sc_sync_ifidx = 0;
}
-int
-pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
- struct pf_state_peer *d)
-{
- if (s->scrub.scrub_flag && d->scrub == NULL) {
- d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
- if (d->scrub == NULL)
- return (ENOMEM);
- }
-
- return (0);
-}
-
void
pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
{
pf_state_export(sp, st);
}
-int
-pfsync_state_import(struct pfsync_state *sp, int flags)
-{
- struct pf_state *st = NULL;
- struct pf_state_key *skw = NULL, *sks = NULL;
- struct pf_rule *r = NULL;
- struct pfi_kif *kif;
- int pool_flags;
- int error = ENOMEM;
- int n = 0;
-
- if (sp->creatorid == 0) {
- DPFPRINTF(LOG_NOTICE, "pfsync_state_import: "
- "invalid creator id: %08x", ntohl(sp->creatorid));
- return (EINVAL);
- }
-
- if ((kif = pfi_kif_get(sp->ifname, NULL)) == NULL) {
- DPFPRINTF(LOG_NOTICE, "pfsync_state_import: "
- "unknown interface: %s", sp->ifname);
- if (flags & PFSYNC_SI_IOCTL)
- return (EINVAL);
- return (0); /* skip this state */
- }
-
- if (sp->af == 0)
- return (0); /* skip this state */
-
- /*
- * If the ruleset checksums match or the state is coming from the ioctl,
- * it's safe to associate the state with the rule of that number.
- */
- if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
- (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) <
- pf_main_ruleset.rules.active.rcount) {
- TAILQ_FOREACH(r, pf_main_ruleset.rules.active.ptr, entries)
- if (ntohl(sp->rule) == n++)
- break;
- } else
- r = &pf_default_rule;
-
- if ((r->max_states && r->states_cur >= r->max_states))
- goto cleanup;
-
- if (flags & PFSYNC_SI_IOCTL)
- pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO;
- else
- pool_flags = PR_NOWAIT | PR_LIMITFAIL | PR_ZERO;
-
- if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL)
- goto cleanup;
-
- if ((skw = pf_alloc_state_key(pool_flags)) == NULL)
- goto cleanup;
-
- if ((sp->key[PF_SK_WIRE].af &&
- (sp->key[PF_SK_WIRE].af != sp->key[PF_SK_STACK].af)) ||
- PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0],
- &sp->key[PF_SK_STACK].addr[0], sp->af) ||
- PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1],
- &sp->key[PF_SK_STACK].addr[1], sp->af) ||
- sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] ||
- sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1] ||
- sp->key[PF_SK_WIRE].rdomain != sp->key[PF_SK_STACK].rdomain) {
- if ((sks = pf_alloc_state_key(pool_flags)) == NULL)
- goto cleanup;
- } else
- sks = skw;
-
- /* allocate memory for scrub info */
- if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
- pfsync_alloc_scrub_memory(&sp->dst, &st->dst))
- goto cleanup;
-
- /* copy to state key(s) */
- skw->addr[0] = sp->key[PF_SK_WIRE].addr[0];
- skw->addr[1] = sp->key[PF_SK_WIRE].addr[1];
- skw->port[0] = sp->key[PF_SK_WIRE].port[0];
- skw->port[1] = sp->key[PF_SK_WIRE].port[1];
- skw->rdomain = ntohs(sp->key[PF_SK_WIRE].rdomain);
- PF_REF_INIT(skw->refcnt);
- skw->proto = sp->proto;
- if (!(skw->af = sp->key[PF_SK_WIRE].af))
- skw->af = sp->af;
- if (sks != skw) {
- sks->addr[0] = sp->key[PF_SK_STACK].addr[0];
- sks->addr[1] = sp->key[PF_SK_STACK].addr[1];
- sks->port[0] = sp->key[PF_SK_STACK].port[0];
- sks->port[1] = sp->key[PF_SK_STACK].port[1];
- sks->rdomain = ntohs(sp->key[PF_SK_STACK].rdomain);
- PF_REF_INIT(sks->refcnt);
- if (!(sks->af = sp->key[PF_SK_STACK].af))
- sks->af = sp->af;
- if (sks->af != skw->af) {
- switch (sp->proto) {
- case IPPROTO_ICMP:
- sks->proto = IPPROTO_ICMPV6;
- break;
- case IPPROTO_ICMPV6:
- sks->proto = IPPROTO_ICMP;
- break;
- default:
- sks->proto = sp->proto;
- }
- } else
- sks->proto = sp->proto;
-
- if (((sks->af != AF_INET) && (sks->af != AF_INET6)) ||
- ((skw->af != AF_INET) && (skw->af != AF_INET6))) {
- error = EINVAL;
- goto cleanup;
- }
-
- } else if ((sks->af != AF_INET) && (sks->af != AF_INET6)) {
- error = EINVAL;
- goto cleanup;
- }
- st->rtableid[PF_SK_WIRE] = ntohl(sp->rtableid[PF_SK_WIRE]);
- st->rtableid[PF_SK_STACK] = ntohl(sp->rtableid[PF_SK_STACK]);
-
- /* copy to state */
- st->rt_addr = sp->rt_addr;
- st->rt = sp->rt;
- st->creation = getuptime() - ntohl(sp->creation);
- st->expire = getuptime();
- if (ntohl(sp->expire)) {
- u_int32_t timeout;
-
- timeout = r->timeout[sp->timeout];
- if (!timeout)
- timeout = pf_default_rule.timeout[sp->timeout];
-
- /* sp->expire may have been adaptively scaled by export. */
- st->expire -= timeout - ntohl(sp->expire);
- }
-
- st->direction = sp->direction;
- st->log = sp->log;
- st->timeout = sp->timeout;
- st->state_flags = ntohs(sp->state_flags);
- st->max_mss = ntohs(sp->max_mss);
- st->min_ttl = sp->min_ttl;
- st->set_tos = sp->set_tos;
- st->set_prio[0] = sp->set_prio[0];
- st->set_prio[1] = sp->set_prio[1];
-
- st->id = sp->id;
- st->creatorid = sp->creatorid;
- pf_state_peer_ntoh(&sp->src, &st->src);
- pf_state_peer_ntoh(&sp->dst, &st->dst);
-
- st->rule.ptr = r;
- st->anchor.ptr = NULL;
-
- st->pfsync_time = getuptime();
- st->sync_state = PFSYNC_S_NONE;
-
- refcnt_init(&st->refcnt);
-
- /* XXX when we have anchors, use STATE_INC_COUNTERS */
- r->states_cur++;
- r->states_tot++;
-
- if (!ISSET(flags, PFSYNC_SI_IOCTL))
- SET(st->state_flags, PFSTATE_NOSYNC);
-
- /*
- * We just set PFSTATE_NOSYNC bit, which prevents
- * pfsync_insert_state() to insert state to pfsync.
- */
- if (pf_state_insert(kif, &skw, &sks, st) != 0) {
- /* XXX when we have anchors, use STATE_DEC_COUNTERS */
- r->states_cur--;
- error = EEXIST;
- goto cleanup_state;
- }
-
- if (!ISSET(flags, PFSYNC_SI_IOCTL)) {
- CLR(st->state_flags, PFSTATE_NOSYNC);
- if (ISSET(st->state_flags, PFSTATE_ACK)) {
- pfsync_q_ins(st, PFSYNC_S_IACK);
- schednetisr(NETISR_PFSYNC);
- }
- }
- CLR(st->state_flags, PFSTATE_ACK);
-
- return (0);
-
- cleanup:
- if (skw == sks)
- sks = NULL;
- if (skw != NULL)
- pool_put(&pf_state_key_pl, skw);
- if (sks != NULL)
- pool_put(&pf_state_key_pl, sks);
-
- cleanup_state: /* pf_state_insert frees the state keys */
- if (st) {
- if (st->dst.scrub)
- pool_put(&pf_state_scrub_pl, st->dst.scrub);
- if (st->src.scrub)
- pool_put(&pf_state_scrub_pl, st->src.scrub);
- pool_put(&pf_state_pl, st);
- }
- return (error);
-}
-
int
pfsync_input(struct mbuf **mp, int *offp, int proto, int af)
{
continue;
}
- if (pfsync_state_import(sp, flags) == ENOMEM) {
+ if (pf_state_import(sp, flags) == ENOMEM) {
/* drop out, but process the rest of the actions */
break;
}
if (st == NULL) {
/* insert the update */
PF_LOCK();
- error = pfsync_state_import(sp, flags);
+ error = pf_state_import(sp, flags);
if (error)
pfsyncstat_inc(pfsyncs_badstate);
PF_UNLOCK();
}
if (sync < 2) {
- pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
+ pf_state_alloc_scrub_memory(&sp->dst, &st->dst);
pf_state_peer_ntoh(&sp->dst, &st->dst);
st->expire = getuptime();
st->timeout = sp->timeout;
pf_state_peer_ntoh(&up->dst, &st->dst);
}
if (sync < 2) {
- pfsync_alloc_scrub_memory(&up->dst, &st->dst);
+ pf_state_alloc_scrub_memory(&up->dst, &st->dst);
pf_state_peer_ntoh(&up->dst, &st->dst);
st->expire = getuptime();
st->timeout = up->timeout;
pfsync_send_plus(&r, sizeof(r));
}
+void
+pfsync_iack(struct pf_state *st)
+{
+ pfsync_q_ins(st, PFSYNC_S_IACK);
+ schednetisr(NETISR_PFSYNC);
+}
+
void
pfsync_q_ins(struct pf_state *st, int q)
{
-/* $OpenBSD: if_pfsync.h,v 1.57 2021/07/07 18:38:25 sashan Exp $ */
+/* $OpenBSD: if_pfsync.h,v 1.58 2022/11/06 18:05:05 dlg Exp $ */
/*
* Copyright (c) 2001 Michael Shalayeff
int pfsync_up(void);
int pfsync_state_in_use(struct pf_state *);
+
+void pfsync_iack(struct pf_state *);
#endif /* _KERNEL */
#endif /* _NET_IF_PFSYNC_H_ */
-/* $OpenBSD: pf.c,v 1.1141 2022/10/10 16:43:12 bket Exp $ */
+/* $OpenBSD: pf.c,v 1.1142 2022/11/06 18:05:05 dlg Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
sp->set_prio[1] = st->set_prio[1];
}
+int
+pf_state_alloc_scrub_memory(const struct pfsync_state_peer *s,
+ struct pf_state_peer *d)
+{
+ if (s->scrub.scrub_flag && d->scrub == NULL)
+ return (pf_normalize_tcp_alloc(d));
+
+ return (0);
+}
+
+int
+pf_state_import(const struct pfsync_state *sp, int flags)
+{
+ struct pf_state *st = NULL;
+ struct pf_state_key *skw = NULL, *sks = NULL;
+ struct pf_rule *r = NULL;
+ struct pfi_kif *kif;
+ int pool_flags;
+ int error = ENOMEM;
+ int n = 0;
+
+ if (sp->creatorid == 0) {
+ DPFPRINTF(LOG_NOTICE, "%s: invalid creator id: %08x", __func__,
+ ntohl(sp->creatorid));
+ return (EINVAL);
+ }
+
+ if ((kif = pfi_kif_get(sp->ifname, NULL)) == NULL) {
+ DPFPRINTF(LOG_NOTICE, "%s: unknown interface: %s", __func__,
+ sp->ifname);
+ if (flags & PFSYNC_SI_IOCTL)
+ return (EINVAL);
+ return (0); /* skip this state */
+ }
+
+ if (sp->af == 0)
+ return (0); /* skip this state */
+
+ /*
+ * If the ruleset checksums match or the state is coming from the ioctl,
+ * it's safe to associate the state with the rule of that number.
+ */
+ if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
+ (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) &&
+ ntohl(sp->rule) < pf_main_ruleset.rules.active.rcount) {
+ TAILQ_FOREACH(r, pf_main_ruleset.rules.active.ptr, entries)
+ if (ntohl(sp->rule) == n++)
+ break;
+ } else
+ r = &pf_default_rule;
+
+ if ((r->max_states && r->states_cur >= r->max_states))
+ goto cleanup;
+
+ if (flags & PFSYNC_SI_IOCTL)
+ pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO;
+ else
+ pool_flags = PR_NOWAIT | PR_LIMITFAIL | PR_ZERO;
+
+ if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL)
+ goto cleanup;
+
+ if ((skw = pf_alloc_state_key(pool_flags)) == NULL)
+ goto cleanup;
+
+ if ((sp->key[PF_SK_WIRE].af &&
+ (sp->key[PF_SK_WIRE].af != sp->key[PF_SK_STACK].af)) ||
+ PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0],
+ &sp->key[PF_SK_STACK].addr[0], sp->af) ||
+ PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1],
+ &sp->key[PF_SK_STACK].addr[1], sp->af) ||
+ sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] ||
+ sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1] ||
+ sp->key[PF_SK_WIRE].rdomain != sp->key[PF_SK_STACK].rdomain) {
+ if ((sks = pf_alloc_state_key(pool_flags)) == NULL)
+ goto cleanup;
+ } else
+ sks = skw;
+
+ /* allocate memory for scrub info */
+ if (pf_state_alloc_scrub_memory(&sp->src, &st->src) ||
+ pf_state_alloc_scrub_memory(&sp->dst, &st->dst))
+ goto cleanup;
+
+ /* copy to state key(s) */
+ skw->addr[0] = sp->key[PF_SK_WIRE].addr[0];
+ skw->addr[1] = sp->key[PF_SK_WIRE].addr[1];
+ skw->port[0] = sp->key[PF_SK_WIRE].port[0];
+ skw->port[1] = sp->key[PF_SK_WIRE].port[1];
+ skw->rdomain = ntohs(sp->key[PF_SK_WIRE].rdomain);
+ PF_REF_INIT(skw->refcnt);
+ skw->proto = sp->proto;
+ if (!(skw->af = sp->key[PF_SK_WIRE].af))
+ skw->af = sp->af;
+ if (sks != skw) {
+ sks->addr[0] = sp->key[PF_SK_STACK].addr[0];
+ sks->addr[1] = sp->key[PF_SK_STACK].addr[1];
+ sks->port[0] = sp->key[PF_SK_STACK].port[0];
+ sks->port[1] = sp->key[PF_SK_STACK].port[1];
+ sks->rdomain = ntohs(sp->key[PF_SK_STACK].rdomain);
+ PF_REF_INIT(sks->refcnt);
+ if (!(sks->af = sp->key[PF_SK_STACK].af))
+ sks->af = sp->af;
+ if (sks->af != skw->af) {
+ switch (sp->proto) {
+ case IPPROTO_ICMP:
+ sks->proto = IPPROTO_ICMPV6;
+ break;
+ case IPPROTO_ICMPV6:
+ sks->proto = IPPROTO_ICMP;
+ break;
+ default:
+ sks->proto = sp->proto;
+ }
+ } else
+ sks->proto = sp->proto;
+
+ if (((sks->af != AF_INET) && (sks->af != AF_INET6)) ||
+ ((skw->af != AF_INET) && (skw->af != AF_INET6))) {
+ error = EINVAL;
+ goto cleanup;
+ }
+
+ } else if ((sks->af != AF_INET) && (sks->af != AF_INET6)) {
+ error = EINVAL;
+ goto cleanup;
+ }
+ st->rtableid[PF_SK_WIRE] = ntohl(sp->rtableid[PF_SK_WIRE]);
+ st->rtableid[PF_SK_STACK] = ntohl(sp->rtableid[PF_SK_STACK]);
+
+ /* copy to state */
+ st->rt_addr = sp->rt_addr;
+ st->rt = sp->rt;
+ st->creation = getuptime() - ntohl(sp->creation);
+ st->expire = getuptime();
+ if (ntohl(sp->expire)) {
+ u_int32_t timeout;
+
+ timeout = r->timeout[sp->timeout];
+ if (!timeout)
+ timeout = pf_default_rule.timeout[sp->timeout];
+
+ /* sp->expire may have been adaptively scaled by export. */
+ st->expire -= timeout - ntohl(sp->expire);
+ }
+
+ st->direction = sp->direction;
+ st->log = sp->log;
+ st->timeout = sp->timeout;
+ st->state_flags = ntohs(sp->state_flags);
+ st->max_mss = ntohs(sp->max_mss);
+ st->min_ttl = sp->min_ttl;
+ st->set_tos = sp->set_tos;
+ st->set_prio[0] = sp->set_prio[0];
+ st->set_prio[1] = sp->set_prio[1];
+
+ st->id = sp->id;
+ st->creatorid = sp->creatorid;
+ pf_state_peer_ntoh(&sp->src, &st->src);
+ pf_state_peer_ntoh(&sp->dst, &st->dst);
+
+ st->rule.ptr = r;
+ st->anchor.ptr = NULL;
+
+ st->pfsync_time = getuptime();
+ st->sync_state = PFSYNC_S_NONE;
+
+ refcnt_init(&st->refcnt);
+
+ /* XXX when we have anchors, use STATE_INC_COUNTERS */
+ r->states_cur++;
+ r->states_tot++;
+
+#if NPFSYNC > 0
+ if (!ISSET(flags, PFSYNC_SI_IOCTL))
+ SET(st->state_flags, PFSTATE_NOSYNC);
+#endif
+
+ /*
+ * We just set PFSTATE_NOSYNC bit, which prevents
+ * pfsync_insert_state() to insert state to pfsync.
+ */
+ if (pf_state_insert(kif, &skw, &sks, st) != 0) {
+ /* XXX when we have anchors, use STATE_DEC_COUNTERS */
+ r->states_cur--;
+ error = EEXIST;
+ goto cleanup_state;
+ }
+
+#if NPFSYNC > 0
+ if (!ISSET(flags, PFSYNC_SI_IOCTL)) {
+ CLR(st->state_flags, PFSTATE_NOSYNC);
+ if (ISSET(st->state_flags, PFSTATE_ACK))
+ pfsync_iack(st);
+ }
+ CLR(st->state_flags, PFSTATE_ACK);
+#endif
+
+ return (0);
+
+ cleanup:
+ if (skw == sks)
+ sks = NULL;
+ if (skw != NULL)
+ pool_put(&pf_state_key_pl, skw);
+ if (sks != NULL)
+ pool_put(&pf_state_key_pl, sks);
+
+ cleanup_state: /* pf_state_insert frees the state keys */
+ if (st) {
+ if (st->dst.scrub)
+ pool_put(&pf_state_scrub_pl, st->dst.scrub);
+ if (st->src.scrub)
+ pool_put(&pf_state_scrub_pl, st->src.scrub);
+ pool_put(&pf_state_pl, st);
+ }
+ return (error);
+}
+
/* END state table stuff */
void
-/* $OpenBSD: pf_ioctl.c,v 1.386 2022/11/06 13:03:52 dlg Exp $ */
+/* $OpenBSD: pf_ioctl.c,v 1.387 2022/11/06 18:05:05 dlg Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
}
NET_LOCK();
PF_LOCK();
- error = pfsync_state_import(sp, PFSYNC_SI_IOCTL);
+ error = pf_state_import(sp, PFSYNC_SI_IOCTL);
PF_UNLOCK();
NET_UNLOCK();
break;
-/* $OpenBSD: pf_norm.c,v 1.225 2022/10/10 16:43:12 bket Exp $ */
+/* $OpenBSD: pf_norm.c,v 1.226 2022/11/06 18:05:05 dlg Exp $ */
/*
* Copyright 2001 Niels Provos <provos@citi.umich.edu>
}
#endif /* INET6 */
+int
+pf_normalize_tcp_alloc(struct pf_state_peer *src)
+{
+ src->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
+ if (src->scrub == NULL)
+ return (ENOMEM);
+
+ return (0);
+}
+
int
pf_normalize_tcp(struct pf_pdesc *pd)
{
KASSERT(src->scrub == NULL);
- src->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT);
- if (src->scrub == NULL)
+ if (pf_normalize_tcp_alloc(src) != 0)
return (1);
- memset(src->scrub, 0, sizeof(*src->scrub));
switch (pd->af) {
case AF_INET: {
-/* $OpenBSD: pfvar.h,v 1.511 2022/10/10 16:43:12 bket Exp $ */
+/* $OpenBSD: pfvar.h,v 1.512 2022/11/06 18:05:05 dlg Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
u_int, int *);
extern void pf_state_export(struct pfsync_state *,
struct pf_state *);
+int pf_state_import(const struct pfsync_state *,
+ int);
+int pf_state_alloc_scrub_memory(
+ const struct pfsync_state_peer *,
+ struct pf_state_peer *);
extern void pf_print_state(struct pf_state *);
extern void pf_print_flags(u_int8_t);
extern void pf_addrcpy(struct pf_addr *, struct pf_addr *,
int pf_normalize_tcp(struct pf_pdesc *);
void pf_normalize_tcp_cleanup(struct pf_state *);
int pf_normalize_tcp_init(struct pf_pdesc *, struct pf_state_peer *);
+int pf_normalize_tcp_alloc(struct pf_state_peer *);
int pf_normalize_tcp_stateful(struct pf_pdesc *, u_short *,
struct pf_state *, struct pf_state_peer *, struct pf_state_peer *,
int *);