-/* $OpenBSD: bgpd.h,v 1.458 2023/01/17 16:09:01 claudio Exp $ */
+/* $OpenBSD: bgpd.h,v 1.459 2023/01/24 11:28:41 claudio Exp $ */
/*
* Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
uint32_t flags;
uint8_t prefixlen;
uint8_t origin;
- uint8_t validation_state;
+ uint8_t roa_validation_state;
+ uint8_t aspa_validation_state;
int8_t dmetric;
/* plus an aspath */
};
-/* $OpenBSD: rde.c,v 1.589 2023/01/18 17:40:17 claudio Exp $ */
+/* $OpenBSD: rde.c,v 1.590 2023/01/24 11:28:41 claudio Exp $ */
/*
* Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
int rde_get_mp_nexthop(u_char *, uint16_t, uint8_t,
struct filterstate *);
void rde_as4byte_fixup(struct rde_peer *, struct rde_aspath *);
-uint8_t rde_aspa_validation(struct rde_peer *, struct rde_aspath *,
+uint8_t rde_aspa_validity(struct rde_peer *, struct rde_aspath *,
uint8_t);
void rde_reflector(struct rde_peer *, struct rde_aspath *);
static void rde_softreconfig_sync_reeval(struct rib_entry *, void *);
static void rde_softreconfig_sync_fib(struct rib_entry *, void *);
static void rde_softreconfig_sync_done(void *, uint8_t);
-static void rde_roa_reload(void);
-static void rde_aspa_reload(void);
+static void rde_rpki_reload(void);
+static int rde_roa_reload(void);
+static int rde_aspa_reload(void);
int rde_update_queue_pending(void);
void rde_update_queue_runner(void);
void rde_update6_queue_runner(uint8_t);
static struct bgpd_config *conf, *nconf;
static struct rde_prefixset rde_roa, roa_new;
static struct rde_aspa *rde_aspa, *aspa_new;
+static uint8_t rde_aspa_generation;
volatile sig_atomic_t rde_quit = 0;
struct filter_head *out_rules, *out_rules_tmp;
break;
case IMSG_RECONF_DONE:
/* end of update */
- rde_roa_reload();
- rde_aspa_reload();
+ if (rde_roa_reload() + rde_aspa_reload() != 0)
+ rde_rpki_reload();
break;
}
imsg_free(&imsg);
state.aspath.flags |= F_ATTR_LOOP;
rde_reflector(peer, &state.aspath);
+
+ /* Cache aspa lookup for all updates from ebgp sessions. */
+ if (state.aspath.flags & F_ATTR_ASPATH && peer->conf.ebgp) {
+ aspa_validation(rde_aspa, state.aspath.aspath,
+ &state.aspath.aspa_state);
+ state.aspath.aspa_generation = rde_aspa_generation;
+ }
}
p = imsg->data;
NULL, 0);
goto done;
}
-#if NOTYET
- state.aspath.aspa_state = rde_aspa_validation(peer,
- &state.aspath, AID_INET);
-#endif
}
while (nlri_len > 0) {
if (peer_has_add_path(peer, AID_INET, CAPA_AP_RECV)) {
mpp += pos;
mplen -= pos;
-#if NOTYET
- state.aspath.aspa_state = rde_aspa_validation(peer,
- &state.aspath, aid);
-#endif
while (mplen > 0) {
if (peer_has_add_path(peer, aid, CAPA_AP_RECV)) {
if (mplen <= sizeof(pathid)) {
{
struct filterstate state;
enum filter_actions action;
- uint16_t i;
uint32_t path_id_tx;
+ uint16_t i;
+ uint8_t roa_state, aspa_state;
const char *wmsg = "filtered, withdraw";
peer->prefix_rcvd_update++;
- in->vstate = rde_roa_validity(&rde_roa, prefix, prefixlen,
+
+ roa_state = rde_roa_validity(&rde_roa, prefix, prefixlen,
aspath_origin(in->aspath.aspath));
+ aspa_state = rde_aspa_validity(peer, &in->aspath, prefix->aid);
+ rde_filterstate_set_vstate(in, roa_state, aspa_state);
path_id_tx = pathid_assign(peer, path_id, prefix, prefixlen);
-
/* add original path to the Adj-RIB-In */
if (prefix_update(rib_byid(RIB_ADJ_IN), peer, path_id, path_id_tx,
in, prefix, prefixlen) == 1)
NULL, prefix, prefixlen);
}
- /* clear state */
rde_filterstate_clean(&state);
}
return (0);
uint8_t
-rde_aspa_validation(struct rde_peer *peer, struct rde_aspath *asp, uint8_t aid)
+rde_aspa_validity(struct rde_peer *peer, struct rde_aspath *asp, uint8_t aid)
{
if (!peer->conf.ebgp) /* ASPA is only performed on ebgp sessions */
return ASPA_NEVER_KNOWN;
*/
/* skip neighbor-as check for transparent RS sessions */
- if (peer->conf.role != ROLE_RS_CLIENT) {
+ if (peer->role != ROLE_RS_CLIENT &&
+ peer->conf.enforce_as != ENFORCE_AS_ON) {
uint32_t fas;
fas = aspath_neighbor(asp->aspath);
}
#endif
- return aspa_validation(rde_aspa, peer->conf.role, asp->aspath, aid);
+ /* if no role is set, the outcome is unknown */
+ if (peer->role == ROLE_NONE)
+ return ASPA_UNKNOWN;
+
+ switch (aid) {
+ case AID_INET:
+ if (peer->role != ROLE_CUSTOMER)
+ return asp->aspa_state.onlyup_v4;
+ else
+ return asp->aspa_state.downup_v4;
+ case AID_INET6:
+ if (peer->role != ROLE_CUSTOMER)
+ return asp->aspa_state.onlyup_v6;
+ else
+ return asp->aspa_state.downup_v6;
+ default:
+ return ASPA_NEVER_KNOWN; /* not reachable */
+ }
}
/*
pt_getaddr(p->pt, &rib.prefix);
rib.prefixlen = p->pt->prefixlen;
rib.origin = asp->origin;
- rib.validation_state = prefix_roa_vstate(p);
+ rib.roa_validation_state = prefix_roa_vstate(p);
+ rib.aspa_validation_state = prefix_aspa_vstate(p);
rib.dmetric = p->dmetric;
rib.flags = 0;
if (!adjout) {
/* add-path send needs rde_eval_all */
rde_eval_all = 1;
}
+ if (peer->role != peer->conf.role) {
+ if (reload == 0)
+ log_debug("peer role change: "
+ "reloading Adj-RIB-In");
+ peer->role = peer->conf.role;
+ reload++;
+ }
peer->export_type = peer->conf.export_type;
peer->flags = peer->conf.flags;
if (peer->flags & PEERFLAG_EVALUATE_ALL)
enum filter_actions action;
struct bgpd_addr prefix;
uint16_t i;
+ uint8_t aspa_vstate;
pt = re->prefix;
pt_getaddr(pt, &prefix);
asp = prefix_aspath(p);
peer = prefix_peer(p);
+ /* possible role change update ASPA validation state */
+ if (prefix_aspa_vstate(p) == ASPA_NEVER_KNOWN)
+ aspa_vstate = ASPA_NEVER_KNOWN;
+ else
+ aspa_vstate = rde_aspa_validity(peer, asp, pt->aid);
+ prefix_set_vstate(p, prefix_roa_vstate(p), aspa_vstate);
+
/* skip announced networks, they are never filtered */
if (asp->flags & F_PREFIX_ANNOUNCED)
continue;
* so this runs outside of the softreconfig handlers.
*/
static void
-rde_roa_softreload(struct rib_entry *re, void *bula)
+rde_rpki_softreload(struct rib_entry *re, void *bula)
{
struct filterstate state;
struct rib *rib;
struct rde_aspath *asp;
enum filter_actions action;
struct bgpd_addr prefix;
- uint8_t vstate;
+ uint8_t roa_vstate, aspa_vstate;
uint16_t i;
pt = re->prefix;
peer = prefix_peer(p);
/* ROA validation state update */
- vstate = rde_roa_validity(&rde_roa,
+ roa_vstate = rde_roa_validity(&rde_roa,
&prefix, pt->prefixlen, aspath_origin(asp->aspath));
- if (vstate == prefix_roa_vstate(p))
+
+ /* ASPA validation state update (if needed) */
+ if (prefix_aspa_vstate(p) == ASPA_NEVER_KNOWN) {
+ aspa_vstate = ASPA_NEVER_KNOWN;
+ } else {
+ if (asp->aspa_generation != rde_aspa_generation) {
+ asp->aspa_generation = rde_aspa_generation;
+ aspa_validation(rde_aspa, asp->aspath,
+ &asp->aspa_state);
+ }
+ aspa_vstate = rde_aspa_validity(peer, asp, pt->aid);
+ }
+
+ if (roa_vstate == prefix_roa_vstate(p) &&
+ aspa_vstate == prefix_aspa_vstate(p))
continue;
- p->validation_state = vstate;
+ prefix_set_vstate(p, roa_vstate, aspa_vstate);
/* skip announced networks, they are never filtered */
if (asp->flags & F_PREFIX_ANNOUNCED)
continue;
}
}
-static int roa_update_pending;
-static int aspa_update_pending;
+static int rpki_update_pending;
static void
-rde_roa_softreload_done(void *arg, uint8_t aid)
+rde_rpki_softreload_done(void *arg, uint8_t aid)
{
/* the roa update is done */
- log_info("ROA softreload done");
- roa_update_pending = 0;
+ log_info("RPKI softreload done");
+ rpki_update_pending = 0;
}
static void
+rde_rpki_reload(void)
+{
+ if (rpki_update_pending) {
+ log_info("RPKI softreload skipped, old still running");
+ return;
+ }
+
+ rpki_update_pending = 1;
+ if (rib_dump_new(RIB_ADJ_IN, AID_UNSPEC, RDE_RUNNER_ROUNDS,
+ rib_byid(RIB_ADJ_IN), rde_rpki_softreload,
+ rde_rpki_softreload_done, NULL) == -1)
+ fatal("%s: rib_dump_new", __func__);
+}
+
+static int
rde_roa_reload(void)
{
struct rde_prefixset roa_old;
- if (roa_update_pending) {
- log_info("ROA softreload skipped, old still running");
- return;
+ if (rpki_update_pending) {
+ trie_free(&roa_new.th); /* can't use new roa table */
+ return 1; /* force call to rde_rpki_reload */
}
roa_old = rde_roa;
if (trie_equal(&rde_roa.th, &roa_old.th)) {
rde_roa.lastchange = roa_old.lastchange;
trie_free(&roa_old.th); /* old roa no longer needed */
- return;
+ return 0;
}
rde_roa.lastchange = getmonotime();
- trie_free(&roa_old.th); /* old roa no longer needed */
+ trie_free(&roa_old.th); /* old roa no longer needed */
log_debug("ROA change: reloading Adj-RIB-In");
- roa_update_pending = 1;
- if (rib_dump_new(RIB_ADJ_IN, AID_UNSPEC, RDE_RUNNER_ROUNDS,
- rib_byid(RIB_ADJ_IN), rde_roa_softreload,
- rde_roa_softreload_done, NULL) == -1)
- fatal("%s: rib_dump_new", __func__);
+ return 1;
}
-static void
+static int
rde_aspa_reload(void)
{
struct rde_aspa *aspa_old;
- if (aspa_update_pending) {
- log_info("ASPA softreload skipped, old still running");
- return;
+ if (rpki_update_pending) {
+ aspa_table_free(aspa_new); /* can't use new aspa table */
+ aspa_new = NULL;
+ return 1; /* rpki_client_relaod warns */
}
aspa_old = rde_aspa;
if (aspa_table_equal(rde_aspa, aspa_old)) {
aspa_table_unchanged(rde_aspa, aspa_old);
aspa_table_free(aspa_old); /* old aspa no longer needed */
- return;
+ return 0;
}
- aspa_table_free(aspa_old); /* old aspa no longer needed */
+ aspa_table_free(aspa_old); /* old aspa no longer needed */
log_debug("ASPA change: reloading Adj-RIB-In");
- /* XXX MISSING */
+ rde_aspa_generation++;
+ return 1;
}
/*
struct filter_set_head *vpnset = NULL;
struct in_addr prefix4;
struct in6_addr prefix6;
- uint16_t i;
uint32_t path_id_tx;
+ uint16_t i;
+ uint8_t vstate;
if (nc->rd != 0) {
SIMPLEQ_FOREACH(vpn, &conf->l3vpns, entry) {
rde_apply_set(vpnset, peerself, peerself, state,
nc->prefix.aid);
- path_id_tx = pathid_assign(peerself, 0, &nc->prefix, nc->prefixlen);
-
-#if NOTYET
- state->aspath.aspa_state = ASPA_NEVER_KNOWN;
-#endif
- state->vstate = rde_roa_validity(&rde_roa, &nc->prefix,
- nc->prefixlen, aspath_origin(state->aspath.aspath));
+ vstate = rde_roa_validity(&rde_roa, &nc->prefix, nc->prefixlen,
+ aspath_origin(state->aspath.aspath));
+ rde_filterstate_set_vstate(state, vstate, ASPA_NEVER_KNOWN);
+ path_id_tx = pathid_assign(peerself, 0, &nc->prefix, nc->prefixlen);
if (prefix_update(rib_byid(RIB_ADJ_IN), peerself, 0, path_id_tx,
state, &nc->prefix, nc->prefixlen) == 1)
peerself->prefix_cnt++;
-/* $OpenBSD: rde.h,v 1.280 2023/01/18 17:40:17 claudio Exp $ */
+/* $OpenBSD: rde.h,v 1.281 2023/01/24 11:28:41 claudio Exp $ */
/*
* Copyright (c) 2003, 2004 Claudio Jeker <claudio@openbsd.org> and
uint32_t path_id_tx;
enum peer_state state;
enum export_type export_type;
+ enum role role;
uint16_t loc_rib_id;
uint16_t short_as;
uint16_t mrt_idx;
};
struct rde_aspa;
+struct rde_aspa_state {
+ uint8_t onlyup_v4;
+ uint8_t downup_v4;
+ uint8_t onlyup_v6;
+ uint8_t downup_v6;
+};
#define AS_SET 1
#define AS_SEQUENCE 2
RB_ENTRY(rde_aspath) entry;
struct attr **others;
struct aspath *aspath;
+ struct rde_aspa_state aspa_state;
int refcnt;
uint32_t flags; /* internally used */
uint32_t med; /* multi exit disc */
uint16_t pftableid; /* pf table id */
uint8_t origin;
uint8_t others_len;
- uint8_t aspa_state;
+ uint8_t aspa_generation;
};
enum nexthop_state {
void rde_filterstate_init(struct filterstate *);
void rde_filterstate_prep(struct filterstate *, struct prefix *);
void rde_filterstate_copy(struct filterstate *, struct filterstate *);
+void rde_filterstate_set_vstate(struct filterstate *, uint8_t, uint8_t);
void rde_filterstate_clean(struct filterstate *);
int rde_filter_equal(struct filter_head *, struct filter_head *,
struct rde_peer *);
return (p->validation_state & ROA_MASK);
}
+static inline uint8_t
+prefix_aspa_vstate(struct prefix *p)
+{
+ return (p->validation_state >> 4);
+}
+
+static inline void
+prefix_set_vstate(struct prefix *p, uint8_t roa_vstate, uint8_t aspa_vstate)
+{
+ p->validation_state = roa_vstate & ROA_MASK;
+ p->validation_state |= aspa_vstate << 4;
+}
+
static inline struct rib_entry *
prefix_re(struct prefix *p)
{
int up_dump_mp_reach(u_char *, int, struct rde_peer *, uint8_t);
/* rde_aspa.c */
-uint8_t aspa_validation(struct rde_aspa *, enum role, struct aspath *,
- uint8_t);
+void aspa_validation(struct rde_aspa *, struct aspath *,
+ struct rde_aspa_state *);
struct rde_aspa *aspa_table_prep(uint32_t, size_t);
void aspa_add_set(struct rde_aspa *, uint32_t, const uint32_t *,
uint32_t, const uint32_t *);
const struct rde_aspa *);
void aspa_table_unchanged(struct rde_aspa *,
const struct rde_aspa *);
+void aspa_table_set_generation(struct rde_aspa *, uint8_t);
#endif /* __RDE_H__ */
-/* $OpenBSD: rde_aspa.c,v 1.2 2023/01/17 16:09:01 claudio Exp $ */
+/* $OpenBSD: rde_aspa.c,v 1.3 2023/01/24 11:28:41 claudio Exp $ */
/*
* Copyright (c) 2022 Claudio Jeker <claudio@openbsd.org>
#include "bgpd.h"
#include "rde.h"
-enum cp_res {
- UNKNOWN = -1,
- NOT_PROVIDER = 0,
- PROVIDER = 1,
-};
+#define UNKNOWN 0x0
+#define NOT_PROVIDER 0x1
+#define PROVIDER 0x2
+
+#define CP(x, y) (x | (y << 4))
+#define CP_GET(x, i) ((x >> (i * 4)) & 0xf)
struct rde_aspa_set {
uint32_t as;
uint32_t maxset;
struct rde_aspa_set *sets;
uint32_t *data;
- size_t maxdata;
- size_t curdata;
+ size_t maxdata;
+ size_t curdata;
uint32_t curset;
time_t lastchange;
};
* Returns UNKNOWN if cas is not in the ra table or the aid is out of range.
* Returns PROVIDER if pas is registered for cas for the specified aid.
* Retruns NOT_PROVIDER otherwise.
+ * The returned value includes the result for both IPv4 and IPv6 and needs
+ * to be looked at with CP_GET.
* This function is called very frequently and needs to be fast.
*/
-static enum cp_res
-aspa_cp_lookup(struct rde_aspa *ra, uint32_t cas, uint32_t pas, uint8_t aid)
+static uint8_t
+aspa_cp_lookup(struct rde_aspa *ra, uint32_t cas, uint32_t pas)
{
struct rde_aspa_set *aspa;
- uint32_t i, mask;
-
- switch (aid) {
- case AID_INET:
- mask = 0x1;
- break;
- case AID_INET6:
- mask = 0x2;
- break;
- default:
- return UNKNOWN;
- }
+ uint32_t i;
aspa = aspa_lookup(ra, cas);
if (aspa == NULL)
- return UNKNOWN;
+ return CP(UNKNOWN, UNKNOWN);
if (aspa->num < 16) {
for (i = 0; i < aspa->num; i++) {
if (aspa->pas[i] == pas)
break;
if (aspa->pas[i] > pas)
- return NOT_PROVIDER;
+ return CP(NOT_PROVIDER, NOT_PROVIDER);
}
if (i == aspa->num)
- return NOT_PROVIDER;
+ return CP(NOT_PROVIDER, NOT_PROVIDER);
} else {
uint32_t lim, x;
for (i = 0, lim = aspa->num; lim != 0; lim /= 2) {
x = lim / 2;
- i += x;
+ i += x;
if (aspa->pas[i] == pas) {
break;
} else if (aspa->pas[i] < pas) {
}
}
if (lim == 0)
- return NOT_PROVIDER;
+ return CP(NOT_PROVIDER, NOT_PROVIDER);
}
if (aspa->pas_aid == NULL)
- return PROVIDER;
- if (aspa->pas_aid[i / 16] & (mask << ((i % 16) * 2)))
- return PROVIDER;
- return NOT_PROVIDER;
+ return CP(PROVIDER, PROVIDER);
+ switch (aspa->pas_aid[i / 16] >> ((i % 16) * 2) & 0x3) {
+ case 0x1:
+ return CP(PROVIDER, NOT_PROVIDER);
+ case 0x2:
+ return CP(NOT_PROVIDER, PROVIDER);
+ case 0x3:
+ return CP(PROVIDER, PROVIDER);
+ default:
+ fatalx("impossible state in aspa_cp_lookup");
+ }
}
/*
* Returns 0 on success and -1 if a AS_SET is encountered.
*/
static int
-aspa_check_aspath(struct rde_aspa *ra, struct aspath *a, int check_downramp,
- uint8_t aid, struct aspa_state *s)
+aspa_check_aspath(struct rde_aspa *ra, struct aspath *a, struct aspa_state *s)
{
uint8_t *seg;
+ int afi;
uint32_t as, prevas = 0;
uint16_t len, seg_size;
- uint8_t i, seg_type, seg_len;
- enum cp_res r;
+ uint8_t i, r, seg_type, seg_len;
- memset(s, 0, sizeof(*s));
- /* the neighbor-as itself is by definition valid */
- s->ndown_p = 1;
+ /* the neighbor-as itself is by definition valid */
+ s[0].ndown_p = 1;
+ s[1].ndown_p = 1;
/*
* Walk aspath and validate if necessary both up- and down-ramp.
- * If an AS_SET is found the result is immediatly ASPA_INVALID.
+ * If an AS_SET is found return -1 to indicate failure.
*/
seg = aspath_dump(a);
len = aspath_length(a);
seg_len = seg[1];
seg_size = 2 + sizeof(uint32_t) * seg_len;
- if (seg_type == AS_SET)
+ if (seg_type != AS_SEQUENCE)
return -1;
for (i = 0; i < seg_len; i++) {
if (as == prevas)
continue; /* skip prepends */
- s->nhops++;
- if (prevas != 0) {
- if (check_downramp) {
- /*
- * down-ramp check, remember the
- * left-most unknown or not-provider
- * node and the right-most provider node
- * for which all nodes before are valid.
- */
- r = aspa_cp_lookup(ra, prevas, as, aid);
- switch (r) {
- case UNKNOWN:
- if (s->ndown_u == 0)
- s->ndown_u = s->nhops;
- break;
- case PROVIDER:
- if (s->ndown_p + 1 == s->nhops)
- s->ndown_p = s->nhops;
- break;
- case NOT_PROVIDER:
- if (s->ndown_np == 0)
- s->ndown_np = s->nhops;
- break;
- }
+ s[0].nhops++;
+ s[1].nhops++;
+ if (prevas == 0) {
+ prevas = as; /* skip left-most AS */
+ continue;
+ }
+
+ /*
+ * down-ramp check, remember the
+ * left-most unknown or not-provider
+ * node and the right-most provider node
+ * for which all nodes before are valid.
+ */
+ r = aspa_cp_lookup(ra, prevas, as);
+ for (afi = 0; afi < 2; afi++) {
+ switch (CP_GET(r, afi)) {
+ case UNKNOWN:
+ if (s[afi].ndown_u == 0)
+ s[afi].ndown_u = s[afi].nhops;
+ break;
+ case PROVIDER:
+ if (s[afi].ndown_p + 1 == s[afi].nhops)
+ s[afi].ndown_p = s[afi].nhops;
+ break;
+ case NOT_PROVIDER:
+ if (s[afi].ndown_np == 0)
+ s[afi].ndown_np = s[afi].nhops;
+ break;
}
- /*
- * up-ramp check, remember the right-most
- * unknown and not-provider node and the
- * left-most provider node for which all nodes
- * after are valid.
- * We recorde the nhops value of prevas,
- * that's why the use of nhops - 1.
- */
- r = aspa_cp_lookup(ra, as, prevas, aid);
- switch (r) {
+ }
+
+ /*
+ * up-ramp check, remember the right-most
+ * unknown and not-provider node and the
+ * left-most provider node for which all nodes
+ * after are valid.
+ * We recorde the nhops value of prevas,
+ * that's why the use of nhops - 1.
+ */
+ r = aspa_cp_lookup(ra, as, prevas);
+ for (afi = 0; afi < 2; afi++) {
+ switch (CP_GET(r, afi)) {
case UNKNOWN:
- s->nup_p = 0;
- s->nup_u = s->nhops - 1;
+ s[afi].nup_p = 0;
+ s[afi].nup_u = s[afi].nhops - 1;
break;
case PROVIDER:
- if (s->nup_p == 0)
- s->nup_p = s->nhops - 1;
+ if (s[afi].nup_p == 0)
+ s[afi].nup_p = s[afi].nhops - 1;
break;
case NOT_PROVIDER:
- s->nup_p = 0;
- s->nup_np = s->nhops - 1;
+ s[afi].nup_p = 0;
+ s[afi].nup_np = s[afi].nhops - 1;
break;
}
}
}
}
- /* the source-as itself is by definition valid */
- if (s->nup_p == 0)
- s->nup_p = s->nhops;
+ /* the source-as itself is by definition valid */
+ if (s[0].nup_p == 0)
+ s[0].nup_p = s[0].nhops;
+ if (s[1].nup_p == 0)
+ s[1].nup_p = s[1].nhops;
return 0;
}
+/*
+ * Set the two possible aspa outcomes for up-ramp only and up/down ramp
+ * in the vstate array.
+ */
+static void
+aspa_check_finalize(struct aspa_state *state, uint8_t *onlyup, uint8_t *downup)
+{
+ /*
+ * Just an up-ramp:
+ * if a check returned NOT_PROVIDER then the result is invalid.
+ * if a check returned UNKNOWN then the result is unknown.
+ * else path is valid.
+ */
+ if (state->nup_np != 0)
+ *onlyup = ASPA_INVALID;
+ else if (state->nup_u != 0)
+ *onlyup = ASPA_UNKNOWN;
+ else
+ *onlyup = ASPA_VALID;
+
+ /*
+ * Both up-ramp and down-ramp:
+ * if nhops <= 2 the result is valid.
+ * if there is less than one AS hop between up-ramp and
+ * down-ramp then the result is valid.
+ * if not-provider nodes for both ramps exist and they
+ * do not overlap the path is invalid.
+ * else the path is unknown.
+ */
+ if (state->nhops <= 2)
+ *downup = ASPA_VALID;
+ else if (state->nup_p - state->ndown_p <= 1)
+ *downup = ASPA_VALID;
+ else if (state->nup_np != 0 && state->ndown_np != 0 &&
+ state->nup_np - state->ndown_np >= 0)
+ *downup = ASPA_INVALID;
+ else
+ *downup = ASPA_UNKNOWN;
+}
+
/*
* Validate an aspath against the aspa_set *ra.
* Returns ASPA_VALID if the aspath is valid, ASPA_UNKNOWN if the
* aspath contains hops with unknown relation and invalid for
* empty aspaths, aspath with AS_SET and aspaths that fail validation.
*/
-uint8_t
-aspa_validation(struct rde_aspa *ra, enum role role, struct aspath *a,
- uint8_t aid)
+void
+aspa_validation(struct rde_aspa *ra, struct aspath *a,
+ struct rde_aspa_state *vstate)
{
- struct aspa_state state;
+ struct aspa_state state[2] = { 0 };
/* no aspa table, evrything is unknown */
- if (ra == NULL)
- return ASPA_UNKNOWN;
+ if (ra == NULL) {
+ memset(vstate, ASPA_UNKNOWN, 4);
+ return;
+ }
/* empty ASPATHs are always invalid */
- if (aspath_length(a) == 0)
- return ASPA_INVALID;
-
- /* if no role is set, the outcome is unknown */
- if (role == ROLE_NONE)
- return ASPA_UNKNOWN;
-
- if (aspa_check_aspath(ra, a, role == ROLE_CUSTOMER, aid, &state) == -1)
- return ASPA_INVALID;
-
- if (role != ROLE_CUSTOMER) {
- /*
- * Just an up-ramp:
- * if a check returned NOT_PROVIDER then the result is invalid.
- * if a check returned UNKNOWN then the result is unknown.
- * else path is valid.
- */
- if (state.nup_np != 0)
- return ASPA_INVALID;
- if (state.nup_u != 0)
- return ASPA_UNKNOWN;
- return ASPA_VALID;
- } else {
- /*
- * Both up-ramp and down-ramp:
- * if nhops <= 2 the result is valid.
- * if there is less than one AS hop between up-ramp and
- * down-ramp then the result is valid.
- * if not-provider nodes for both ramps exist and they
- * do not overlap the path is invalid.
- * else the path is unknown.
- */
- if (state.nhops <= 2)
- return ASPA_VALID;
- if (state.nup_p - state.ndown_p <= 1)
- return ASPA_VALID;
- if (state.nup_np != 0 && state.ndown_np != 0 &&
- state.nup_np - state.ndown_np >= 0)
- return ASPA_INVALID;
- return ASPA_UNKNOWN;
+ if (aspath_length(a) == 0) {
+ memset(vstate, ASPA_INVALID, 4);
+ return;
}
+
+ if (aspa_check_aspath(ra, a, state) == -1) {
+ memset(vstate, ASPA_INVALID, 4);
+ return;
+ }
+
+ aspa_check_finalize(state, &vstate->onlyup_v4, &vstate->downup_v4);
+ aspa_check_finalize(state + 1, &vstate->onlyup_v6, &vstate->downup_v6);
}
/*
if ((ra->data = malloc(datasize)) == NULL)
fatal("aspa table prep");
-
+
ra->mask = hsize - 1;
ra->maxset = entries;
ra->maxdata = datasize / sizeof(ra->data[0]);
-/* $OpenBSD: rde_filter.c,v 1.131 2023/01/12 17:35:51 claudio Exp $ */
+/* $OpenBSD: rde_filter.c,v 1.132 2023/01/24 11:28:41 claudio Exp $ */
/*
* Copyright (c) 2004 Claudio Jeker <claudio@openbsd.org>
return (0);
if (f->match.ovs.is_set) {
- if (state->vstate != f->match.ovs.validity)
+ if ((state->vstate & ROA_MASK) != f->match.ovs.validity)
return (0);
}
state->vstate = vstate;
}
+/*
+ * Build a filterstate based on the prefix p.
+ */
void
rde_filterstate_prep(struct filterstate *state, struct prefix *p)
{
rde_filterstate_set(state, prefix_aspath(p), prefix_communities(p),
- prefix_nexthop(p), prefix_nhflags(p), prefix_roa_vstate(p));
+ prefix_nexthop(p), prefix_nhflags(p), p->validation_state);
}
+/*
+ * Copy a filterstate to a new filterstate.
+ */
void
rde_filterstate_copy(struct filterstate *state, struct filterstate *src)
{
src->nexthop, src->nhflags, src->vstate);
}
+/*
+ * Set the vstate based on the aspa_state and the supplied roa vstate.
+ * This function must be called after rde_filterstate_init().
+ * rde_filterstate_prep() and rde_filterstate_copy() set the right vstate.
+ */
+void
+rde_filterstate_set_vstate(struct filterstate *state, uint8_t roa_vstate,
+ uint8_t aspa_state)
+{
+ state->vstate = aspa_state << 4;
+ state->vstate |= roa_vstate & ROA_MASK;
+}
+
void
rde_filterstate_clean(struct filterstate *state)
{
-/* $OpenBSD: rde_peer.c,v 1.26 2023/01/18 13:20:01 claudio Exp $ */
+/* $OpenBSD: rde_peer.c,v 1.27 2023/01/24 11:28:41 claudio Exp $ */
/*
* Copyright (c) 2019 Claudio Jeker <claudio@openbsd.org>
fatalx("King Bula's new peer met an unknown RIB");
peer->state = PEER_NONE;
peer->eval = peer->conf.eval;
+ peer->role = peer->conf.role;
peer->export_type = peer->conf.export_type;
peer->flags = peer->conf.flags;
SIMPLEQ_INIT(&peer->imsg_queue);
-/* $OpenBSD: rde_rib.c,v 1.253 2023/01/20 10:28:22 claudio Exp $ */
+/* $OpenBSD: rde_rib.c,v 1.254 2023/01/24 11:28:41 claudio Exp $ */
/*
* Copyright (c) 2003, 2004 Claudio Jeker <claudio@openbsd.org>
if (a->pftableid < b->pftableid)
return (-1);
+ /* no need to check aspa_state or aspa_generation */
+
r = aspath_compare(a->aspath, b->aspath);
if (r > 0)
return (1);
-/* $OpenBSD: rde_update.c,v 1.152 2023/01/18 17:40:17 claudio Exp $ */
+/* $OpenBSD: rde_update.c,v 1.153 2023/01/24 11:28:41 claudio Exp $ */
/*
* Copyright (c) 2004 Claudio Jeker <claudio@openbsd.org>
asp = &state.aspath;
asp->aspath = aspath_get(NULL, 0);
asp->origin = ORIGIN_IGP;
-#ifdef NOTYET
- asp->aspa_state = ASPA_NEVER_KNOWN;
-#endif
+ rde_filterstate_set_vstate(&state, ROA_NOTFOUND, ASPA_NEVER_KNOWN);
/* the other default values are OK, nexthop is once again NULL */
/*