-/* $OpenBSD: rde.c,v 1.398 2018/07/22 06:03:17 claudio Exp $ */
+/* $OpenBSD: rde.c,v 1.399 2018/07/22 16:59:08 claudio Exp $ */
/*
* Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
imsg->hdr.len - IMSG_HEADER_SIZE - 4 - withdrawn_len - attrpath_len;
bzero(&mpa, sizeof(mpa));
- rde_filterstate_prep(&state, NULL);
+ rde_filterstate_prep(&state, NULL, NULL);
if (attrpath_len != 0) { /* 0 = no NLRI information in this message */
/* parse path attributes */
while (len > 0) {
goto done;
}
- /*
- * this works because asp is not linked.
- * But first unlock the previously locked nexthop.
- */
- if (state.aspath.nexthop) {
- (void)nexthop_put(state.aspath.nexthop);
- state.aspath.nexthop = NULL;
- }
+ /* unlock the previously locked nexthop, it is no longer used */
+ (void)nexthop_put(state.nexthop);
+ state.nexthop = NULL;
if ((pos = rde_get_mp_nexthop(mpp, mplen, aid, &state)) == -1) {
log_peer_warnx(&peer->conf, "bad nlri nexthop");
rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
for (i = RIB_LOC_START; i < rib_size; i++) {
if (*ribs[i].name == '\0')
break;
- rde_filterstate_prep(&state, &in->aspath);
+ rde_filterstate_prep(&state, &in->aspath, in->nexthop);
/* input filter */
action = rde_filter(ribs[i].in_rules, peer, p, &state);
if (action == ACTION_ALLOW) {
rde_update_log("update", i, peer,
- &state.aspath.nexthop->exit_nexthop, prefix,
+ &state.nexthop->exit_nexthop, prefix,
prefixlen);
path_update(&ribs[i].rib, peer, &state, prefix,
prefixlen, 0);
op, len);
return (-1);
}
- a->nexthop = nexthop_get(&nexthop);
+ nexthop_put(state->nexthop); /* just to be sure */
+ state->nexthop = nexthop_get(&nexthop);
break;
case ATTR_MED:
if (attr_len != 4)
return (-1);
}
- state->aspath.nexthop = nexthop_get(&nexthop);
+ nexthop_put(state->nexthop); /* just to be sure */
+ state->nexthop = nexthop_get(&nexthop);
/* ignore reserved (old SNPA) field as per RFC4760 */
totlen += nhlen + 1;
if (up_test_update(peer, p) != 1)
return;
- rde_filterstate_prep(&state, prefix_aspath(p));
+ rde_filterstate_prep(&state, prefix_aspath(p), prefix_nexthop(p));
a = rde_filter(out_rules, peer, p, &state);
if (a == ACTION_ALLOW)
asp = prefix_aspath(p);
peer = asp->peer;
- rde_filterstate_prep(&state, asp);
+ rde_filterstate_prep(&state, asp, prefix_nexthop(p));
action = rde_filter(rib->in_rules, peer, p, &state);
if (action == ACTION_ALLOW) {
if (up_test_update(peer, p) != 1)
return;
- rde_filterstate_prep(&ostate, prefix_aspath(p));
- rde_filterstate_prep(&nstate, prefix_aspath(p));
+ rde_filterstate_prep(&ostate, prefix_aspath(p), prefix_nexthop(p));
+ rde_filterstate_prep(&nstate, prefix_aspath(p), prefix_nexthop(p));
oa = rde_filter(out_rules_tmp, peer, p, &ostate);
na = rde_filter(out_rules, peer, p, &nstate);
/* nothing todo */
if (oa == ACTION_DENY && na == ACTION_ALLOW) {
/* send update */
- up_generate(peer, &nstate.aspath, &addr, pt->prefixlen);
+ up_generate(peer, &nstate, &addr, pt->prefixlen);
} else if (oa == ACTION_ALLOW && na == ACTION_DENY) {
/* send withdraw */
up_generate(peer, NULL, &addr, pt->prefixlen);
} else if (oa == ACTION_ALLOW && na == ACTION_ALLOW) {
+ /* XXX update nexthop for now, ugly but will go away */
+ nexthop_put(nstate.aspath.nexthop);
+ nstate.aspath.nexthop = nexthop_ref(nstate.nexthop);
+ nstate.aspath.flags = (nstate.aspath.flags & ~F_NEXTHOP_MASK) |
+ (nstate.nhflags & F_NEXTHOP_MASK);
+ nexthop_put(ostate.aspath.nexthop);
+ ostate.aspath.nexthop = nexthop_ref(ostate.nexthop);
+ ostate.aspath.flags = (ostate.aspath.flags & ~F_NEXTHOP_MASK) |
+ (ostate.nhflags & F_NEXTHOP_MASK);
/* send update if path attributes changed */
if (path_compare(&nstate.aspath, &ostate.aspath) != 0)
- up_generate(peer, &nstate.aspath, &addr, pt->prefixlen);
+ up_generate(peer, &nstate, &addr, pt->prefixlen);
}
rde_filterstate_clean(&ostate);
if (up_test_update(peer, p) != 1)
return;
- rde_filterstate_prep(&ostate, prefix_aspath(p));
+ rde_filterstate_prep(&ostate, prefix_aspath(p), prefix_nexthop(p));
if (rde_filter(out_rules_tmp, peer, p, &ostate) != ACTION_DENY) {
/* send withdraw */
up_generate(peer, NULL, &addr, pt->prefixlen);
}
if (!flagstatic)
asp->flags |= F_ANN_DYNAMIC;
- rde_filterstate_prep(&state, asp);
+ rde_filterstate_prep(&state, asp, NULL); /* nexthop is not set */
rde_apply_set(&nc->attrset, &state, nc->prefix.aid, peerself, peerself);
if (vpnset)
rde_apply_set(vpnset, &state, nc->prefix.aid, peerself,
-/* $OpenBSD: rde_filter.c,v 1.96 2018/07/16 09:09:20 claudio Exp $ */
+/* $OpenBSD: rde_filter.c,v 1.97 2018/07/22 16:59:08 claudio Exp $ */
/*
* Copyright (c) 2004 Claudio Jeker <claudio@openbsd.org>
#include "log.h"
int rde_filter_match(struct filter_rule *, struct rde_peer *,
- struct rde_aspath *, struct prefix *);
+ struct filterstate *, struct prefix *);
int rde_prefix_match(struct filter_prefix *, struct prefix *);
int filterset_equal(struct filter_set_head *, struct filter_set_head *);
case ACTION_SET_NEXTHOP_NOMODIFY:
case ACTION_SET_NEXTHOP_SELF:
nexthop_modify(set->action.nh, set->type, aid,
- &state->aspath.nexthop, &state->aspath.flags);
+ &state->nexthop, &state->nhflags);
break;
case ACTION_SET_COMMUNITY:
switch (set->action.community.as) {
int
rde_filter_match(struct filter_rule *f, struct rde_peer *peer,
- struct rde_aspath *asp, struct prefix *p)
+ struct filterstate *state, struct prefix *p)
{
u_int32_t pas;
int cas, type;
int64_t las, ld1, ld2;
- struct prefixset_item *psi;
+ struct prefixset_item *psi;
+ struct rde_aspath *asp = NULL;
+
+ if (state != NULL)
+ asp = &state->aspath;
if (asp != NULL && f->match.as.type != AS_NONE) {
if (f->match.as.flags & AS_FLAG_NEIGHBORAS)
return (0);
}
- if (asp != NULL && f->peer.ebgp && !peer->conf.ebgp)
- return (0);
- if (asp != NULL && f->peer.ibgp && peer->conf.ebgp)
- return (0);
+ if (f->peer.ebgp && !peer->conf.ebgp)
+ return (0);
+ if (f->peer.ibgp && peer->conf.ebgp)
+ return (0);
if (asp != NULL && f->match.aslen.type != ASLEN_NONE)
if (aspath_lenmatch(asp->aspath, f->match.aslen.type,
return (0);
}
- if (f->match.nexthop.flags != 0) {
+ if (state != NULL && f->match.nexthop.flags != 0) {
struct bgpd_addr *nexthop, *cmpaddr;
- if (asp != NULL && asp->nexthop == NULL)
+ if (state->nexthop == NULL)
/* no nexthop, skip */
return (0);
- nexthop = &asp->nexthop->exit_nexthop;
+ nexthop = &state->nexthop->exit_nexthop;
if (f->match.nexthop.flags == FILTER_NEXTHOP_ADDR)
cmpaddr = &f->match.nexthop.addr;
else
}
void
-rde_filterstate_prep(struct filterstate *state, struct rde_aspath *asp)
+rde_filterstate_prep(struct filterstate *state, struct rde_aspath *asp,
+ struct nexthop *nh)
{
memset(state, 0, sizeof(*state));
path_prep(&state->aspath);
if (asp)
path_copy(&state->aspath, asp);
+ state->nexthop = nexthop_ref(nh);
+ /* XXX the flag handling needs improvement */
+ if (asp)
+ state->nhflags |= asp->flags & F_NEXTHOP_MASK;
}
void
rde_filterstate_clean(struct filterstate *state)
{
path_clean(&state->aspath);
+ nexthop_put(state->nexthop);
+ state->nexthop = NULL;
}
void
struct prefix *p, struct filterstate *state)
{
struct filter_rule *f;
- struct rde_aspath *asp = prefix_aspath(p);
enum filter_actions action = ACTION_DENY; /* default deny */
- if (asp->flags & F_ATTR_PARSE_ERR)
+ if (state && state->aspath.flags & F_ATTR_PARSE_ERR)
/*
* don't try to filter bad updates just deny them
* so they act as implicit withdraws
f->peer.peerid != peer->conf.id),
f->skip[RDE_FILTER_SKIP_PEERID].ptr);
- if (rde_filter_match(f, peer, asp, p)) {
+ if (rde_filter_match(f, peer, state, p)) {
if (state != NULL) {
rde_apply_set(&f->set, state,
p->re->prefix->aid, prefix_peer(p), peer);
-/* $OpenBSD: rde_update.c,v 1.94 2018/07/09 14:08:48 claudio Exp $ */
+/* $OpenBSD: rde_update.c,v 1.95 2018/07/22 16:59:08 claudio Exp $ */
/*
* Copyright (c) 2004 Claudio Jeker <claudio@openbsd.org>
#include "rde.h"
#include "log.h"
-in_addr_t up_get_nexthop(struct rde_peer *, struct rde_aspath *);
+in_addr_t up_get_nexthop(struct rde_peer *, struct filterstate *);
int up_generate_mp_reach(struct rde_peer *, struct update_attr *,
- struct rde_aspath *, u_int8_t);
+ struct filterstate *, u_int8_t);
int up_generate_attr(struct rde_peer *, struct update_attr *,
- struct rde_aspath *, u_int8_t);
+ struct filterstate *, u_int8_t);
/* update stuff. */
struct update_prefix {
}
int
-up_generate(struct rde_peer *peer, struct rde_aspath *asp,
+up_generate(struct rde_peer *peer, struct filterstate *state,
struct bgpd_addr *addr, u_int8_t prefixlen)
{
struct update_attr *ua = NULL;
struct update_prefix *up;
SIPHASH_CTX ctx;
- if (asp) {
+ if (state) {
ua = calloc(1, sizeof(struct update_attr));
if (ua == NULL)
fatal("up_generate");
- if (up_generate_attr(peer, ua, asp, addr->aid) == -1) {
+ if (up_generate_attr(peer, ua, state, addr->aid) == -1) {
log_warnx("generation of bgp path attributes failed");
free(ua);
return (-1);
return;
}
- rde_filterstate_prep(&state, prefix_aspath(new));
+ rde_filterstate_prep(&state, prefix_aspath(new),
+ prefix_nexthop(new));
if (rde_filter(rules, peer, new, &state) == ACTION_DENY) {
rde_filterstate_clean(&state);
goto withdraw;
}
pt_getaddr(new->re->prefix, &addr);
- up_generate(peer, &state.aspath, &addr,
+ up_generate(peer, &state, &addr,
new->re->prefix->prefixlen);
rde_filterstate_clean(&state);
p.flags = 0;
/* filter as usual */
- rde_filterstate_prep(&state, asp);
+ rde_filterstate_prep(&state, asp, NULL);
if (rde_filter(rules, peer, &p, &state) == ACTION_DENY) {
rde_filterstate_clean(&state);
return;
}
- up_generate(peer, &state.aspath, &addr, 0);
+ up_generate(peer, &state, &addr, 0);
/* no longer needed */
rde_filterstate_clean(&state);
/* only for IPv4 */
in_addr_t
-up_get_nexthop(struct rde_peer *peer, struct rde_aspath *a)
+up_get_nexthop(struct rde_peer *peer, struct filterstate *state)
{
in_addr_t mask;
/* nexthop, already network byte order */
- if (a->flags & F_NEXTHOP_NOMODIFY) {
+ if (state->nhflags & F_NEXTHOP_NOMODIFY) {
/* no modify flag set */
- if (a->nexthop == NULL)
+ if (state->nexthop == NULL)
return (peer->local_v4_addr.v4.s_addr);
else
- return (a->nexthop->exit_nexthop.v4.s_addr);
- } else if (a->flags & F_NEXTHOP_SELF)
+ return (state->nexthop->exit_nexthop.v4.s_addr);
+ } else if (state->nhflags & F_NEXTHOP_SELF)
return (peer->local_v4_addr.v4.s_addr);
else if (!peer->conf.ebgp) {
/*
* If directly connected use peer->local_v4_addr
* this is only true for announced networks.
*/
- if (a->nexthop == NULL)
+ if (state->nexthop == NULL)
return (peer->local_v4_addr.v4.s_addr);
- else if (a->nexthop->exit_nexthop.v4.s_addr ==
+ else if (state->nexthop->exit_nexthop.v4.s_addr ==
peer->remote_addr.v4.s_addr)
/*
* per RFC: if remote peer address is equal to
*/
return (peer->local_v4_addr.v4.s_addr);
else
- return (a->nexthop->exit_nexthop.v4.s_addr);
+ return (state->nexthop->exit_nexthop.v4.s_addr);
} else if (peer->conf.distance == 1) {
/* ebgp directly connected */
- if (a->nexthop != NULL &&
- a->nexthop->flags & NEXTHOP_CONNECTED) {
+ if (state->nexthop != NULL &&
+ state->nexthop->flags & NEXTHOP_CONNECTED) {
mask = htonl(
- prefixlen2mask(a->nexthop->nexthop_netlen));
+ prefixlen2mask(state->nexthop->nexthop_netlen));
if ((peer->remote_addr.v4.s_addr & mask) ==
- (a->nexthop->nexthop_net.v4.s_addr & mask))
+ (state->nexthop->nexthop_net.v4.s_addr & mask))
/* nexthop and peer are in the same net */
- return (a->nexthop->exit_nexthop.v4.s_addr);
+ return (state->nexthop->exit_nexthop.v4.s_addr);
else
return (peer->local_v4_addr.v4.s_addr);
} else
int
up_generate_mp_reach(struct rde_peer *peer, struct update_attr *upa,
- struct rde_aspath *a, u_int8_t aid)
+ struct filterstate *state, u_int8_t aid)
{
u_int16_t tmp;
upa->mpattr[20] = 0; /* Reserved must be 0 */
/* nexthop dance see also up_get_nexthop() */
- if (a->flags & F_NEXTHOP_NOMODIFY) {
+ if (state->nhflags & F_NEXTHOP_NOMODIFY) {
/* no modify flag set */
- if (a->nexthop == NULL)
+ if (state->nexthop == NULL)
memcpy(&upa->mpattr[4], &peer->local_v6_addr.v6,
sizeof(struct in6_addr));
else
memcpy(&upa->mpattr[4],
- &a->nexthop->exit_nexthop.v6,
+ &state->nexthop->exit_nexthop.v6,
sizeof(struct in6_addr));
- } else if (a->flags & F_NEXTHOP_SELF)
+ } else if (state->nhflags & F_NEXTHOP_SELF)
memcpy(&upa->mpattr[4], &peer->local_v6_addr.v6,
sizeof(struct in6_addr));
else if (!peer->conf.ebgp) {
/* ibgp */
- if (a->nexthop == NULL ||
- (a->nexthop->exit_nexthop.aid == AID_INET6 &&
- !memcmp(&a->nexthop->exit_nexthop.v6,
+ if (state->nexthop == NULL ||
+ (state->nexthop->exit_nexthop.aid == AID_INET6 &&
+ !memcmp(&state->nexthop->exit_nexthop.v6,
&peer->remote_addr.v6, sizeof(struct in6_addr))))
memcpy(&upa->mpattr[4], &peer->local_v6_addr.v6,
sizeof(struct in6_addr));
else
memcpy(&upa->mpattr[4],
- &a->nexthop->exit_nexthop.v6,
+ &state->nexthop->exit_nexthop.v6,
sizeof(struct in6_addr));
} else if (peer->conf.distance == 1) {
/* ebgp directly connected */
- if (a->nexthop != NULL &&
- a->nexthop->flags & NEXTHOP_CONNECTED)
+ if (state->nexthop != NULL &&
+ state->nexthop->flags & NEXTHOP_CONNECTED)
if (prefix_compare(&peer->remote_addr,
- &a->nexthop->nexthop_net,
- a->nexthop->nexthop_netlen) == 0) {
+ &state->nexthop->nexthop_net,
+ state->nexthop->nexthop_netlen) == 0) {
/*
* nexthop and peer are in the same
* subnet
*/
memcpy(&upa->mpattr[4],
- &a->nexthop->exit_nexthop.v6,
+ &state->nexthop->exit_nexthop.v6,
sizeof(struct in6_addr));
return (0);
}
upa->mpattr[3] = sizeof(u_int64_t) + sizeof(struct in_addr);
/* nexthop dance see also up_get_nexthop() */
- if (a->flags & F_NEXTHOP_NOMODIFY) {
+ if (state->nhflags & F_NEXTHOP_NOMODIFY) {
/* no modify flag set */
- if (a->nexthop == NULL)
+ if (state->nexthop == NULL)
memcpy(&upa->mpattr[12],
&peer->local_v4_addr.v4,
sizeof(struct in_addr));
else
/* nexthops are stored as IPv4 addrs */
memcpy(&upa->mpattr[12],
- &a->nexthop->exit_nexthop.v4,
+ &state->nexthop->exit_nexthop.v4,
sizeof(struct in_addr));
- } else if (a->flags & F_NEXTHOP_SELF)
+ } else if (state->nhflags & F_NEXTHOP_SELF)
memcpy(&upa->mpattr[12], &peer->local_v4_addr.v4,
sizeof(struct in_addr));
else if (!peer->conf.ebgp) {
/* ibgp */
- if (a->nexthop == NULL ||
- (a->nexthop->exit_nexthop.aid == AID_INET &&
- !memcmp(&a->nexthop->exit_nexthop.v4,
+ if (state->nexthop == NULL ||
+ (state->nexthop->exit_nexthop.aid == AID_INET &&
+ !memcmp(&state->nexthop->exit_nexthop.v4,
&peer->remote_addr.v4, sizeof(struct in_addr))))
memcpy(&upa->mpattr[12],
&peer->local_v4_addr.v4,
sizeof(struct in_addr));
else
memcpy(&upa->mpattr[12],
- &a->nexthop->exit_nexthop.v4,
+ &state->nexthop->exit_nexthop.v4,
sizeof(struct in_addr));
} else if (peer->conf.distance == 1) {
/* ebgp directly connected */
- if (a->nexthop != NULL &&
- a->nexthop->flags & NEXTHOP_CONNECTED)
+ if (state->nexthop != NULL &&
+ state->nexthop->flags & NEXTHOP_CONNECTED)
if (prefix_compare(&peer->remote_addr,
- &a->nexthop->nexthop_net,
- a->nexthop->nexthop_netlen) == 0) {
+ &state->nexthop->nexthop_net,
+ state->nexthop->nexthop_netlen) == 0) {
/*
* nexthop and peer are in the same
* subnet
*/
memcpy(&upa->mpattr[12],
- &a->nexthop->exit_nexthop.v4,
+ &state->nexthop->exit_nexthop.v4,
sizeof(struct in_addr));
return (0);
}
int
up_generate_attr(struct rde_peer *peer, struct update_attr *upa,
- struct rde_aspath *a, u_int8_t aid)
+ struct filterstate *state, u_int8_t aid)
{
+ struct rde_aspath *asp = &state->aspath;
struct attr *oa, *newaggr = NULL;
u_char *pdata;
u_int32_t tmp32;
/* origin */
if ((r = attr_write(up_attr_buf + wlen, len, ATTR_WELL_KNOWN,
- ATTR_ORIGIN, &a->origin, 1)) == -1)
+ ATTR_ORIGIN, &asp->origin, 1)) == -1)
return (-1);
wlen += r; len -= r;
/* aspath */
if (!peer->conf.ebgp ||
peer->conf.flags & PEERFLAG_TRANS_AS)
- pdata = aspath_prepend(a->aspath, peer->conf.local_as, 0,
+ pdata = aspath_prepend(asp->aspath, peer->conf.local_as, 0,
&plen);
else
- pdata = aspath_prepend(a->aspath, peer->conf.local_as, 1,
+ pdata = aspath_prepend(asp->aspath, peer->conf.local_as, 1,
&plen);
if (!rde_as4byte(peer))
switch (aid) {
case AID_INET:
- nexthop = up_get_nexthop(peer, a);
+ nexthop = up_get_nexthop(peer, state);
if ((r = attr_write(up_attr_buf + wlen, len, ATTR_WELL_KNOWN,
ATTR_NEXTHOP, &nexthop, 4)) == -1)
return (-1);
* unless the MED is originating from us or the peer is an IBGP one.
* Only exception are routers with "transparent-as yes" set.
*/
- if (a->flags & F_ATTR_MED && (!peer->conf.ebgp ||
- a->flags & F_ATTR_MED_ANNOUNCE ||
+ if (asp->flags & F_ATTR_MED && (!peer->conf.ebgp ||
+ asp->flags & F_ATTR_MED_ANNOUNCE ||
peer->conf.flags & PEERFLAG_TRANS_AS)) {
- tmp32 = htonl(a->med);
+ tmp32 = htonl(asp->med);
if ((r = attr_write(up_attr_buf + wlen, len, ATTR_OPTIONAL,
ATTR_MED, &tmp32, 4)) == -1)
return (-1);
if (!peer->conf.ebgp) {
/* local preference, only valid for ibgp */
- tmp32 = htonl(a->lpref);
+ tmp32 = htonl(asp->lpref);
if ((r = attr_write(up_attr_buf + wlen, len, ATTR_WELL_KNOWN,
ATTR_LOCALPREF, &tmp32, 4)) == -1)
return (-1);
* 3. transitive known attrs: announce unmodified
* 4. transitive unknown attrs: set partial bit and re-announce
*/
- for (l = 0; l < a->others_len; l++) {
- if ((oa = a->others[l]) == NULL)
+ for (l = 0; l < asp->others_len; l++) {
+ if ((oa = asp->others[l]) == NULL)
break;
switch (oa->type) {
case ATTR_ATOMIC_AGGREGATE:
if (neednewpath) {
if (!peer->conf.ebgp ||
peer->conf.flags & PEERFLAG_TRANS_AS)
- pdata = aspath_prepend(a->aspath, peer->conf.local_as,
+ pdata = aspath_prepend(asp->aspath, peer->conf.local_as,
0, &plen);
else
- pdata = aspath_prepend(a->aspath, peer->conf.local_as,
+ pdata = aspath_prepend(asp->aspath, peer->conf.local_as,
1, &plen);
flags = ATTR_OPTIONAL|ATTR_TRANSITIVE;
- if (!(a->flags & F_PREFIX_ANNOUNCED))
+ if (!(asp->flags & F_PREFIX_ANNOUNCED))
flags |= ATTR_PARTIAL;
if (plen == 0)
r = 0;
}
if (newaggr) {
flags = ATTR_OPTIONAL|ATTR_TRANSITIVE;
- if (!(a->flags & F_PREFIX_ANNOUNCED))
+ if (!(asp->flags & F_PREFIX_ANNOUNCED))
flags |= ATTR_PARTIAL;
if ((r = attr_write(up_attr_buf + wlen, len, flags,
ATTR_AS4_AGGREGATOR, newaggr->data, newaggr->len)) == -1)
/* write mp attribute to different buffer */
if (ismp)
- if (up_generate_mp_reach(peer, upa, a, aid) == -1)
+ if (up_generate_mp_reach(peer, upa, state, aid) == -1)
return (-1);
/* the bgp path attributes are now stored in the global buf */