From e004f9b1ce004809c6da01202abc8fcfcd9c22cc Mon Sep 17 00:00:00 2001 From: dlg Date: Sun, 6 Nov 2022 18:05:05 +0000 Subject: [PATCH] move pfsync_state_import in if_pfsync.c to pf_state_import in pf.c this is straightening the deck chairs. the state import and export code are used by both the pf ioctls and pfsync, but the export code is in pf.c and the import code is in if_pfsync. if pfsync was disabled then the ioctl stuff wouldnt link. moving the import code to pf.c makes it more symmetrical(?) and robust. tweaks and ok from kn@ sashan@ --- sys/net/if_pfsync.c | 239 +++----------------------------------------- sys/net/if_pfsync.h | 4 +- sys/net/pf.c | 221 +++++++++++++++++++++++++++++++++++++++- sys/net/pf_ioctl.c | 4 +- sys/net/pf_norm.c | 16 ++- sys/net/pfvar.h | 8 +- 6 files changed, 256 insertions(+), 236 deletions(-) diff --git a/sys/net/if_pfsync.c b/sys/net/if_pfsync.c index 7174e50ecf4..8f36e816f8b 100644 --- a/sys/net/if_pfsync.c +++ b/sys/net/if_pfsync.c @@ -1,4 +1,4 @@ -/* $OpenBSD: if_pfsync.c,v 1.306 2022/11/05 22:33:11 jan Exp $ */ +/* $OpenBSD: if_pfsync.c,v 1.307 2022/11/06 18:05:05 dlg Exp $ */ /* * Copyright (c) 2002 Michael Shalayeff @@ -265,8 +265,6 @@ struct cpumem *pfsynccounters; void pfsyncattach(int); int pfsync_clone_create(struct if_clone *, int); int pfsync_clone_destroy(struct ifnet *); -int pfsync_alloc_scrub_memory(struct pfsync_state_peer *, - struct pf_state_peer *); void pfsync_update_net_tdb(struct pfsync_tdb *); int pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *, struct rtentry *); @@ -503,232 +501,12 @@ pfsync_ifdetach(void *arg) sc->sc_sync_ifidx = 0; } -int -pfsync_alloc_scrub_memory(struct pfsync_state_peer *s, - struct pf_state_peer *d) -{ - if (s->scrub.scrub_flag && d->scrub == NULL) { - d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO); - if (d->scrub == NULL) - return (ENOMEM); - } - - return (0); -} - void pfsync_state_export(struct pfsync_state *sp, struct pf_state *st) { pf_state_export(sp, st); } -int -pfsync_state_import(struct pfsync_state *sp, int flags) -{ - struct pf_state *st = NULL; - struct pf_state_key *skw = NULL, *sks = NULL; - struct pf_rule *r = NULL; - struct pfi_kif *kif; - int pool_flags; - int error = ENOMEM; - int n = 0; - - if (sp->creatorid == 0) { - DPFPRINTF(LOG_NOTICE, "pfsync_state_import: " - "invalid creator id: %08x", ntohl(sp->creatorid)); - return (EINVAL); - } - - if ((kif = pfi_kif_get(sp->ifname, NULL)) == NULL) { - DPFPRINTF(LOG_NOTICE, "pfsync_state_import: " - "unknown interface: %s", sp->ifname); - if (flags & PFSYNC_SI_IOCTL) - return (EINVAL); - return (0); /* skip this state */ - } - - if (sp->af == 0) - return (0); /* skip this state */ - - /* - * If the ruleset checksums match or the state is coming from the ioctl, - * it's safe to associate the state with the rule of that number. - */ - if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) && - (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) < - pf_main_ruleset.rules.active.rcount) { - TAILQ_FOREACH(r, pf_main_ruleset.rules.active.ptr, entries) - if (ntohl(sp->rule) == n++) - break; - } else - r = &pf_default_rule; - - if ((r->max_states && r->states_cur >= r->max_states)) - goto cleanup; - - if (flags & PFSYNC_SI_IOCTL) - pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO; - else - pool_flags = PR_NOWAIT | PR_LIMITFAIL | PR_ZERO; - - if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL) - goto cleanup; - - if ((skw = pf_alloc_state_key(pool_flags)) == NULL) - goto cleanup; - - if ((sp->key[PF_SK_WIRE].af && - (sp->key[PF_SK_WIRE].af != sp->key[PF_SK_STACK].af)) || - PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0], - &sp->key[PF_SK_STACK].addr[0], sp->af) || - PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1], - &sp->key[PF_SK_STACK].addr[1], sp->af) || - sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] || - sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1] || - sp->key[PF_SK_WIRE].rdomain != sp->key[PF_SK_STACK].rdomain) { - if ((sks = pf_alloc_state_key(pool_flags)) == NULL) - goto cleanup; - } else - sks = skw; - - /* allocate memory for scrub info */ - if (pfsync_alloc_scrub_memory(&sp->src, &st->src) || - pfsync_alloc_scrub_memory(&sp->dst, &st->dst)) - goto cleanup; - - /* copy to state key(s) */ - skw->addr[0] = sp->key[PF_SK_WIRE].addr[0]; - skw->addr[1] = sp->key[PF_SK_WIRE].addr[1]; - skw->port[0] = sp->key[PF_SK_WIRE].port[0]; - skw->port[1] = sp->key[PF_SK_WIRE].port[1]; - skw->rdomain = ntohs(sp->key[PF_SK_WIRE].rdomain); - PF_REF_INIT(skw->refcnt); - skw->proto = sp->proto; - if (!(skw->af = sp->key[PF_SK_WIRE].af)) - skw->af = sp->af; - if (sks != skw) { - sks->addr[0] = sp->key[PF_SK_STACK].addr[0]; - sks->addr[1] = sp->key[PF_SK_STACK].addr[1]; - sks->port[0] = sp->key[PF_SK_STACK].port[0]; - sks->port[1] = sp->key[PF_SK_STACK].port[1]; - sks->rdomain = ntohs(sp->key[PF_SK_STACK].rdomain); - PF_REF_INIT(sks->refcnt); - if (!(sks->af = sp->key[PF_SK_STACK].af)) - sks->af = sp->af; - if (sks->af != skw->af) { - switch (sp->proto) { - case IPPROTO_ICMP: - sks->proto = IPPROTO_ICMPV6; - break; - case IPPROTO_ICMPV6: - sks->proto = IPPROTO_ICMP; - break; - default: - sks->proto = sp->proto; - } - } else - sks->proto = sp->proto; - - if (((sks->af != AF_INET) && (sks->af != AF_INET6)) || - ((skw->af != AF_INET) && (skw->af != AF_INET6))) { - error = EINVAL; - goto cleanup; - } - - } else if ((sks->af != AF_INET) && (sks->af != AF_INET6)) { - error = EINVAL; - goto cleanup; - } - st->rtableid[PF_SK_WIRE] = ntohl(sp->rtableid[PF_SK_WIRE]); - st->rtableid[PF_SK_STACK] = ntohl(sp->rtableid[PF_SK_STACK]); - - /* copy to state */ - st->rt_addr = sp->rt_addr; - st->rt = sp->rt; - st->creation = getuptime() - ntohl(sp->creation); - st->expire = getuptime(); - if (ntohl(sp->expire)) { - u_int32_t timeout; - - timeout = r->timeout[sp->timeout]; - if (!timeout) - timeout = pf_default_rule.timeout[sp->timeout]; - - /* sp->expire may have been adaptively scaled by export. */ - st->expire -= timeout - ntohl(sp->expire); - } - - st->direction = sp->direction; - st->log = sp->log; - st->timeout = sp->timeout; - st->state_flags = ntohs(sp->state_flags); - st->max_mss = ntohs(sp->max_mss); - st->min_ttl = sp->min_ttl; - st->set_tos = sp->set_tos; - st->set_prio[0] = sp->set_prio[0]; - st->set_prio[1] = sp->set_prio[1]; - - st->id = sp->id; - st->creatorid = sp->creatorid; - pf_state_peer_ntoh(&sp->src, &st->src); - pf_state_peer_ntoh(&sp->dst, &st->dst); - - st->rule.ptr = r; - st->anchor.ptr = NULL; - - st->pfsync_time = getuptime(); - st->sync_state = PFSYNC_S_NONE; - - refcnt_init(&st->refcnt); - - /* XXX when we have anchors, use STATE_INC_COUNTERS */ - r->states_cur++; - r->states_tot++; - - if (!ISSET(flags, PFSYNC_SI_IOCTL)) - SET(st->state_flags, PFSTATE_NOSYNC); - - /* - * We just set PFSTATE_NOSYNC bit, which prevents - * pfsync_insert_state() to insert state to pfsync. - */ - if (pf_state_insert(kif, &skw, &sks, st) != 0) { - /* XXX when we have anchors, use STATE_DEC_COUNTERS */ - r->states_cur--; - error = EEXIST; - goto cleanup_state; - } - - if (!ISSET(flags, PFSYNC_SI_IOCTL)) { - CLR(st->state_flags, PFSTATE_NOSYNC); - if (ISSET(st->state_flags, PFSTATE_ACK)) { - pfsync_q_ins(st, PFSYNC_S_IACK); - schednetisr(NETISR_PFSYNC); - } - } - CLR(st->state_flags, PFSTATE_ACK); - - return (0); - - cleanup: - if (skw == sks) - sks = NULL; - if (skw != NULL) - pool_put(&pf_state_key_pl, skw); - if (sks != NULL) - pool_put(&pf_state_key_pl, sks); - - cleanup_state: /* pf_state_insert frees the state keys */ - if (st) { - if (st->dst.scrub) - pool_put(&pf_state_scrub_pl, st->dst.scrub); - if (st->src.scrub) - pool_put(&pf_state_scrub_pl, st->src.scrub); - pool_put(&pf_state_pl, st); - } - return (error); -} - int pfsync_input(struct mbuf **mp, int *offp, int proto, int af) { @@ -892,7 +670,7 @@ pfsync_in_ins(caddr_t buf, int len, int count, int flags) continue; } - if (pfsync_state_import(sp, flags) == ENOMEM) { + if (pf_state_import(sp, flags) == ENOMEM) { /* drop out, but process the rest of the actions */ break; } @@ -996,7 +774,7 @@ pfsync_in_upd(caddr_t buf, int len, int count, int flags) if (st == NULL) { /* insert the update */ PF_LOCK(); - error = pfsync_state_import(sp, flags); + error = pf_state_import(sp, flags); if (error) pfsyncstat_inc(pfsyncs_badstate); PF_UNLOCK(); @@ -1027,7 +805,7 @@ pfsync_in_upd(caddr_t buf, int len, int count, int flags) } if (sync < 2) { - pfsync_alloc_scrub_memory(&sp->dst, &st->dst); + pf_state_alloc_scrub_memory(&sp->dst, &st->dst); pf_state_peer_ntoh(&sp->dst, &st->dst); st->expire = getuptime(); st->timeout = sp->timeout; @@ -1106,7 +884,7 @@ pfsync_in_upd_c(caddr_t buf, int len, int count, int flags) pf_state_peer_ntoh(&up->dst, &st->dst); } if (sync < 2) { - pfsync_alloc_scrub_memory(&up->dst, &st->dst); + pf_state_alloc_scrub_memory(&up->dst, &st->dst); pf_state_peer_ntoh(&up->dst, &st->dst); st->expire = getuptime(); st->timeout = up->timeout; @@ -2426,6 +2204,13 @@ pfsync_clear_states(u_int32_t creatorid, const char *ifname) pfsync_send_plus(&r, sizeof(r)); } +void +pfsync_iack(struct pf_state *st) +{ + pfsync_q_ins(st, PFSYNC_S_IACK); + schednetisr(NETISR_PFSYNC); +} + void pfsync_q_ins(struct pf_state *st, int q) { diff --git a/sys/net/if_pfsync.h b/sys/net/if_pfsync.h index bee6c77f228..5447b829d74 100644 --- a/sys/net/if_pfsync.h +++ b/sys/net/if_pfsync.h @@ -1,4 +1,4 @@ -/* $OpenBSD: if_pfsync.h,v 1.57 2021/07/07 18:38:25 sashan Exp $ */ +/* $OpenBSD: if_pfsync.h,v 1.58 2022/11/06 18:05:05 dlg Exp $ */ /* * Copyright (c) 2001 Michael Shalayeff @@ -343,6 +343,8 @@ void pfsync_undefer(struct pfsync_deferral *, int); int pfsync_up(void); int pfsync_state_in_use(struct pf_state *); + +void pfsync_iack(struct pf_state *); #endif /* _KERNEL */ #endif /* _NET_IF_PFSYNC_H_ */ diff --git a/sys/net/pf.c b/sys/net/pf.c index 4c70b08571e..62108ab25c9 100644 --- a/sys/net/pf.c +++ b/sys/net/pf.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pf.c,v 1.1141 2022/10/10 16:43:12 bket Exp $ */ +/* $OpenBSD: pf.c,v 1.1142 2022/11/06 18:05:05 dlg Exp $ */ /* * Copyright (c) 2001 Daniel Hartmeier @@ -1261,6 +1261,225 @@ pf_state_export(struct pfsync_state *sp, struct pf_state *st) sp->set_prio[1] = st->set_prio[1]; } +int +pf_state_alloc_scrub_memory(const struct pfsync_state_peer *s, + struct pf_state_peer *d) +{ + if (s->scrub.scrub_flag && d->scrub == NULL) + return (pf_normalize_tcp_alloc(d)); + + return (0); +} + +int +pf_state_import(const struct pfsync_state *sp, int flags) +{ + struct pf_state *st = NULL; + struct pf_state_key *skw = NULL, *sks = NULL; + struct pf_rule *r = NULL; + struct pfi_kif *kif; + int pool_flags; + int error = ENOMEM; + int n = 0; + + if (sp->creatorid == 0) { + DPFPRINTF(LOG_NOTICE, "%s: invalid creator id: %08x", __func__, + ntohl(sp->creatorid)); + return (EINVAL); + } + + if ((kif = pfi_kif_get(sp->ifname, NULL)) == NULL) { + DPFPRINTF(LOG_NOTICE, "%s: unknown interface: %s", __func__, + sp->ifname); + if (flags & PFSYNC_SI_IOCTL) + return (EINVAL); + return (0); /* skip this state */ + } + + if (sp->af == 0) + return (0); /* skip this state */ + + /* + * If the ruleset checksums match or the state is coming from the ioctl, + * it's safe to associate the state with the rule of that number. + */ + if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) && + (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && + ntohl(sp->rule) < pf_main_ruleset.rules.active.rcount) { + TAILQ_FOREACH(r, pf_main_ruleset.rules.active.ptr, entries) + if (ntohl(sp->rule) == n++) + break; + } else + r = &pf_default_rule; + + if ((r->max_states && r->states_cur >= r->max_states)) + goto cleanup; + + if (flags & PFSYNC_SI_IOCTL) + pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO; + else + pool_flags = PR_NOWAIT | PR_LIMITFAIL | PR_ZERO; + + if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL) + goto cleanup; + + if ((skw = pf_alloc_state_key(pool_flags)) == NULL) + goto cleanup; + + if ((sp->key[PF_SK_WIRE].af && + (sp->key[PF_SK_WIRE].af != sp->key[PF_SK_STACK].af)) || + PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0], + &sp->key[PF_SK_STACK].addr[0], sp->af) || + PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1], + &sp->key[PF_SK_STACK].addr[1], sp->af) || + sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] || + sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1] || + sp->key[PF_SK_WIRE].rdomain != sp->key[PF_SK_STACK].rdomain) { + if ((sks = pf_alloc_state_key(pool_flags)) == NULL) + goto cleanup; + } else + sks = skw; + + /* allocate memory for scrub info */ + if (pf_state_alloc_scrub_memory(&sp->src, &st->src) || + pf_state_alloc_scrub_memory(&sp->dst, &st->dst)) + goto cleanup; + + /* copy to state key(s) */ + skw->addr[0] = sp->key[PF_SK_WIRE].addr[0]; + skw->addr[1] = sp->key[PF_SK_WIRE].addr[1]; + skw->port[0] = sp->key[PF_SK_WIRE].port[0]; + skw->port[1] = sp->key[PF_SK_WIRE].port[1]; + skw->rdomain = ntohs(sp->key[PF_SK_WIRE].rdomain); + PF_REF_INIT(skw->refcnt); + skw->proto = sp->proto; + if (!(skw->af = sp->key[PF_SK_WIRE].af)) + skw->af = sp->af; + if (sks != skw) { + sks->addr[0] = sp->key[PF_SK_STACK].addr[0]; + sks->addr[1] = sp->key[PF_SK_STACK].addr[1]; + sks->port[0] = sp->key[PF_SK_STACK].port[0]; + sks->port[1] = sp->key[PF_SK_STACK].port[1]; + sks->rdomain = ntohs(sp->key[PF_SK_STACK].rdomain); + PF_REF_INIT(sks->refcnt); + if (!(sks->af = sp->key[PF_SK_STACK].af)) + sks->af = sp->af; + if (sks->af != skw->af) { + switch (sp->proto) { + case IPPROTO_ICMP: + sks->proto = IPPROTO_ICMPV6; + break; + case IPPROTO_ICMPV6: + sks->proto = IPPROTO_ICMP; + break; + default: + sks->proto = sp->proto; + } + } else + sks->proto = sp->proto; + + if (((sks->af != AF_INET) && (sks->af != AF_INET6)) || + ((skw->af != AF_INET) && (skw->af != AF_INET6))) { + error = EINVAL; + goto cleanup; + } + + } else if ((sks->af != AF_INET) && (sks->af != AF_INET6)) { + error = EINVAL; + goto cleanup; + } + st->rtableid[PF_SK_WIRE] = ntohl(sp->rtableid[PF_SK_WIRE]); + st->rtableid[PF_SK_STACK] = ntohl(sp->rtableid[PF_SK_STACK]); + + /* copy to state */ + st->rt_addr = sp->rt_addr; + st->rt = sp->rt; + st->creation = getuptime() - ntohl(sp->creation); + st->expire = getuptime(); + if (ntohl(sp->expire)) { + u_int32_t timeout; + + timeout = r->timeout[sp->timeout]; + if (!timeout) + timeout = pf_default_rule.timeout[sp->timeout]; + + /* sp->expire may have been adaptively scaled by export. */ + st->expire -= timeout - ntohl(sp->expire); + } + + st->direction = sp->direction; + st->log = sp->log; + st->timeout = sp->timeout; + st->state_flags = ntohs(sp->state_flags); + st->max_mss = ntohs(sp->max_mss); + st->min_ttl = sp->min_ttl; + st->set_tos = sp->set_tos; + st->set_prio[0] = sp->set_prio[0]; + st->set_prio[1] = sp->set_prio[1]; + + st->id = sp->id; + st->creatorid = sp->creatorid; + pf_state_peer_ntoh(&sp->src, &st->src); + pf_state_peer_ntoh(&sp->dst, &st->dst); + + st->rule.ptr = r; + st->anchor.ptr = NULL; + + st->pfsync_time = getuptime(); + st->sync_state = PFSYNC_S_NONE; + + refcnt_init(&st->refcnt); + + /* XXX when we have anchors, use STATE_INC_COUNTERS */ + r->states_cur++; + r->states_tot++; + +#if NPFSYNC > 0 + if (!ISSET(flags, PFSYNC_SI_IOCTL)) + SET(st->state_flags, PFSTATE_NOSYNC); +#endif + + /* + * We just set PFSTATE_NOSYNC bit, which prevents + * pfsync_insert_state() to insert state to pfsync. + */ + if (pf_state_insert(kif, &skw, &sks, st) != 0) { + /* XXX when we have anchors, use STATE_DEC_COUNTERS */ + r->states_cur--; + error = EEXIST; + goto cleanup_state; + } + +#if NPFSYNC > 0 + if (!ISSET(flags, PFSYNC_SI_IOCTL)) { + CLR(st->state_flags, PFSTATE_NOSYNC); + if (ISSET(st->state_flags, PFSTATE_ACK)) + pfsync_iack(st); + } + CLR(st->state_flags, PFSTATE_ACK); +#endif + + return (0); + + cleanup: + if (skw == sks) + sks = NULL; + if (skw != NULL) + pool_put(&pf_state_key_pl, skw); + if (sks != NULL) + pool_put(&pf_state_key_pl, sks); + + cleanup_state: /* pf_state_insert frees the state keys */ + if (st) { + if (st->dst.scrub) + pool_put(&pf_state_scrub_pl, st->dst.scrub); + if (st->src.scrub) + pool_put(&pf_state_scrub_pl, st->src.scrub); + pool_put(&pf_state_pl, st); + } + return (error); +} + /* END state table stuff */ void diff --git a/sys/net/pf_ioctl.c b/sys/net/pf_ioctl.c index ef4f18e730d..60b11cb0630 100644 --- a/sys/net/pf_ioctl.c +++ b/sys/net/pf_ioctl.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pf_ioctl.c,v 1.386 2022/11/06 13:03:52 dlg Exp $ */ +/* $OpenBSD: pf_ioctl.c,v 1.387 2022/11/06 18:05:05 dlg Exp $ */ /* * Copyright (c) 2001 Daniel Hartmeier @@ -1882,7 +1882,7 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) } NET_LOCK(); PF_LOCK(); - error = pfsync_state_import(sp, PFSYNC_SI_IOCTL); + error = pf_state_import(sp, PFSYNC_SI_IOCTL); PF_UNLOCK(); NET_UNLOCK(); break; diff --git a/sys/net/pf_norm.c b/sys/net/pf_norm.c index f04930d34c9..db2f5755bcd 100644 --- a/sys/net/pf_norm.c +++ b/sys/net/pf_norm.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pf_norm.c,v 1.225 2022/10/10 16:43:12 bket Exp $ */ +/* $OpenBSD: pf_norm.c,v 1.226 2022/11/06 18:05:05 dlg Exp $ */ /* * Copyright 2001 Niels Provos @@ -1098,6 +1098,16 @@ no_fragment: } #endif /* INET6 */ +int +pf_normalize_tcp_alloc(struct pf_state_peer *src) +{ + src->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO); + if (src->scrub == NULL) + return (ENOMEM); + + return (0); +} + int pf_normalize_tcp(struct pf_pdesc *pd) { @@ -1165,10 +1175,8 @@ pf_normalize_tcp_init(struct pf_pdesc *pd, struct pf_state_peer *src) KASSERT(src->scrub == NULL); - src->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT); - if (src->scrub == NULL) + if (pf_normalize_tcp_alloc(src) != 0) return (1); - memset(src->scrub, 0, sizeof(*src->scrub)); switch (pd->af) { case AF_INET: { diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h index 8339863a94a..8d972897bcb 100644 --- a/sys/net/pfvar.h +++ b/sys/net/pfvar.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pfvar.h,v 1.511 2022/10/10 16:43:12 bket Exp $ */ +/* $OpenBSD: pfvar.h,v 1.512 2022/11/06 18:05:05 dlg Exp $ */ /* * Copyright (c) 2001 Daniel Hartmeier @@ -1741,6 +1741,11 @@ extern struct pf_state *pf_find_state_all(struct pf_state_key_cmp *, u_int, int *); extern void pf_state_export(struct pfsync_state *, struct pf_state *); +int pf_state_import(const struct pfsync_state *, + int); +int pf_state_alloc_scrub_memory( + const struct pfsync_state_peer *, + struct pf_state_peer *); extern void pf_print_state(struct pf_state *); extern void pf_print_flags(u_int8_t); extern void pf_addrcpy(struct pf_addr *, struct pf_addr *, @@ -1791,6 +1796,7 @@ int pf_normalize_ip6(struct pf_pdesc *, u_short *); int pf_normalize_tcp(struct pf_pdesc *); void pf_normalize_tcp_cleanup(struct pf_state *); int pf_normalize_tcp_init(struct pf_pdesc *, struct pf_state_peer *); +int pf_normalize_tcp_alloc(struct pf_state_peer *); int pf_normalize_tcp_stateful(struct pf_pdesc *, u_short *, struct pf_state *, struct pf_state_peer *, struct pf_state_peer *, int *); -- 2.20.1