From d7e935310df3b6df3c5d4f74d811a0443a53e589 Mon Sep 17 00:00:00 2001 From: claudio Date: Tue, 17 Jan 2023 16:09:01 +0000 Subject: [PATCH] Add the needed logic to load the ASPA table from the rtr process into the RDE. The actual reload logic is missing to keep the diff small. OK tb@ --- usr.sbin/bgpd/bgpd.h | 9 +++- usr.sbin/bgpd/rde.c | 104 +++++++++++++++++++++++++++++++++++++-- usr.sbin/bgpd/rde.h | 14 ++++-- usr.sbin/bgpd/rde_aspa.c | 63 ++++++++++++++++++++---- usr.sbin/bgpd/rtr.c | 95 ++++++++++++++++++++++++++++------- 5 files changed, 250 insertions(+), 35 deletions(-) diff --git a/usr.sbin/bgpd/bgpd.h b/usr.sbin/bgpd/bgpd.h index b3e66469430..bb2bd25f96c 100644 --- a/usr.sbin/bgpd/bgpd.h +++ b/usr.sbin/bgpd/bgpd.h @@ -1,4 +1,4 @@ -/* $OpenBSD: bgpd.h,v 1.457 2023/01/11 13:53:17 claudio Exp $ */ +/* $OpenBSD: bgpd.h,v 1.458 2023/01/17 16:09:01 claudio Exp $ */ /* * Copyright (c) 2003, 2004 Henning Brauer @@ -604,6 +604,7 @@ enum imsg_type { IMSG_RECONF_ASPA_TAS, IMSG_RECONF_ASPA_TAS_AID, IMSG_RECONF_ASPA_DONE, + IMSG_RECONF_ASPA_PREP, IMSG_RECONF_RTR_CONFIG, IMSG_RECONF_DRAIN, IMSG_RECONF_DONE, @@ -801,6 +802,7 @@ struct ctl_show_set { PREFIX_SET, ORIGIN_SET, ROA_SET, + ASPA_SET, } type; }; @@ -1180,6 +1182,11 @@ struct aspa_set { RB_ENTRY(aspa_set) entry; }; +struct aspa_prep { + size_t datasize; + uint32_t entries; +}; + struct l3vpn { SIMPLEQ_ENTRY(l3vpn) entry; char descr[PEER_DESCR_LEN]; diff --git a/usr.sbin/bgpd/rde.c b/usr.sbin/bgpd/rde.c index 4cf55b2bb71..2f0fa6c99ed 100644 --- a/usr.sbin/bgpd/rde.c +++ b/usr.sbin/bgpd/rde.c @@ -1,4 +1,4 @@ -/* $OpenBSD: rde.c,v 1.586 2023/01/16 10:37:08 claudio Exp $ */ +/* $OpenBSD: rde.c,v 1.587 2023/01/17 16:09:01 claudio Exp $ */ /* * Copyright (c) 2003, 2004 Henning Brauer @@ -83,6 +83,7 @@ static void rde_softreconfig_sync_reeval(struct rib_entry *, void *); static void rde_softreconfig_sync_fib(struct rib_entry *, void *); static void rde_softreconfig_sync_done(void *, uint8_t); static void rde_roa_reload(void); +static void rde_aspa_reload(void); int rde_update_queue_pending(void); void rde_update_queue_runner(void); void rde_update6_queue_runner(uint8_t); @@ -109,7 +110,7 @@ static struct imsgbuf *ibuf_rtr; static struct imsgbuf *ibuf_main; static struct bgpd_config *conf, *nconf; static struct rde_prefixset rde_roa, roa_new; -static struct rde_aspa *rde_aspa /* , *aspa_new */; +static struct rde_aspa *rde_aspa, *aspa_new; volatile sig_atomic_t rde_quit = 0; struct filter_head *out_rules, *out_rules_tmp; @@ -641,6 +642,14 @@ badnetdel: imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_SET, 0, imsg.hdr.pid, -1, &cset, sizeof(cset)); + /* then aspa set */ + memset(&cset, 0, sizeof(cset)); + cset.type = ASPA_SET; + strlcpy(cset.name, "RPKI ASPA", sizeof(cset.name)); + aspa_table_stats(rde_aspa, &cset); + imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_SET, 0, + imsg.hdr.pid, -1, &cset, sizeof(cset)); + SIMPLEQ_FOREACH(aset, &conf->as_sets, entry) { memset(&cset, 0, sizeof(cset)); cset.type = ASNUM_SET; @@ -1065,9 +1074,11 @@ rde_dispatch_imsg_parent(struct imsgbuf *ibuf) void rde_dispatch_imsg_rtr(struct imsgbuf *ibuf) { - struct imsg imsg; - struct roa roa; - int n; + static struct aspa_set *aspa; + struct imsg imsg; + struct roa roa; + struct aspa_prep ap; + int n; while (ibuf) { if ((n = imsg_get(ibuf, &imsg)) == -1) @@ -1094,9 +1105,65 @@ rde_dispatch_imsg_rtr(struct imsgbuf *ibuf) log_addr(&p), roa.prefixlen); } break; + case IMSG_RECONF_ASPA_PREP: + if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(ap)) + fatalx("IMSG_RECONF_ASPA_PREP bad len"); + if (aspa_new) + fatalx("unexpected IMSG_RECONF_ASPA_PREP"); + memcpy(&ap, imsg.data, sizeof(ap)); + aspa_new = aspa_table_prep(ap.entries, ap.datasize); + break; + case IMSG_RECONF_ASPA: + if (aspa_new == NULL) + fatalx("unexpected IMSG_RECONF_ASPA"); + if (aspa != NULL) + fatalx("IMSG_RECONF_ASPA already sent"); + if (imsg.hdr.len - IMSG_HEADER_SIZE != + sizeof(uint32_t) * 2) + fatalx("IMSG_RECONF_ASPA bad len"); + + if ((aspa = calloc(1, sizeof(*aspa))) == NULL) + fatal("IMSG_RECONF_ASPA"); + memcpy(&aspa->as, imsg.data, sizeof(aspa->as)); + memcpy(&aspa->num, (char *)imsg.data + sizeof(aspa->as), + sizeof(aspa->num)); + break; + case IMSG_RECONF_ASPA_TAS: + if (aspa == NULL) + fatalx("unexpected IMSG_RECONF_ASPA_TAS"); + if (imsg.hdr.len - IMSG_HEADER_SIZE != + aspa->num * sizeof(uint32_t)) + fatalx("IMSG_RECONF_ASPA_TAS bad len"); + aspa->tas = reallocarray(NULL, aspa->num, + sizeof(uint32_t)); + if (aspa->tas == NULL) + fatal("IMSG_RECONF_ASPA_TAS"); + memcpy(aspa->tas, imsg.data, + aspa->num * sizeof(uint32_t)); + break; + case IMSG_RECONF_ASPA_TAS_AID: + if (aspa == NULL) + fatalx("unexpected IMSG_RECONF_ASPA_TAS_AID"); + if (imsg.hdr.len - IMSG_HEADER_SIZE != + (aspa->num + 15) / 16) + fatalx("IMSG_RECONF_ASPA_TAS_AID bad len"); + aspa->tas_aid = malloc((aspa->num + 15) / 16); + if (aspa->tas_aid == NULL) + fatal("IMSG_RECONF_ASPA_TAS_AID"); + memcpy(aspa->tas_aid, imsg.data, (aspa->num + 15) / 16); + break; + case IMSG_RECONF_ASPA_DONE: + if (aspa_new == NULL) + fatalx("unexpected IMSG_RECONF_ASPA"); + aspa_add_set(aspa_new, aspa->as, aspa->tas, + aspa->num, (void *)aspa->tas_aid); + free_aspa(aspa); + aspa = NULL; + break; case IMSG_RECONF_DONE: /* end of update */ rde_roa_reload(); + rde_aspa_reload(); break; } imsg_free(&imsg); @@ -3933,6 +4000,7 @@ rde_roa_softreload(struct rib_entry *re, void *bula) } static int roa_update_pending; +static int aspa_update_pending; static void rde_roa_softreload_done(void *arg, uint8_t aid) @@ -3974,6 +4042,32 @@ rde_roa_reload(void) fatal("%s: rib_dump_new", __func__); } +static void +rde_aspa_reload(void) +{ + struct rde_aspa *aspa_old; + + if (aspa_update_pending) { + log_info("ASPA softreload skipped, old still running"); + return; + } + + aspa_old = rde_aspa; + rde_aspa = aspa_new; + aspa_new = NULL; + + /* check if aspa changed */ + if (aspa_table_equal(rde_aspa, aspa_old)) { + aspa_table_unchanged(rde_aspa, aspa_old); + aspa_table_free(aspa_old); /* old aspa no longer needed */ + return; + } + + aspa_table_free(aspa_old); /* old aspa no longer needed */ + log_debug("ASPA change: reloading Adj-RIB-In"); + /* XXX MISSING */ +} + /* * generic helper function */ diff --git a/usr.sbin/bgpd/rde.h b/usr.sbin/bgpd/rde.h index 85f7235dcd7..c6a192d68fe 100644 --- a/usr.sbin/bgpd/rde.h +++ b/usr.sbin/bgpd/rde.h @@ -1,4 +1,4 @@ -/* $OpenBSD: rde.h,v 1.278 2023/01/12 17:35:51 claudio Exp $ */ +/* $OpenBSD: rde.h,v 1.279 2023/01/17 16:09:01 claudio Exp $ */ /* * Copyright (c) 2003, 2004 Claudio Jeker and @@ -113,6 +113,8 @@ struct rde_peer { uint8_t flags; }; +struct rde_aspa; + #define AS_SET 1 #define AS_SEQUENCE 2 #define AS_CONFED_SEQUENCE 3 @@ -731,11 +733,17 @@ int up_dump_attrnlri(u_char *, int, struct rde_peer *); int up_dump_mp_reach(u_char *, int, struct rde_peer *, uint8_t); /* rde_aspa.c */ +uint8_t aspa_validation(struct rde_aspa *, enum role, struct aspath *, + uint8_t); struct rde_aspa *aspa_table_prep(uint32_t, size_t); void aspa_add_set(struct rde_aspa *, uint32_t, const uint32_t *, uint32_t, const uint32_t *); void aspa_table_free(struct rde_aspa *); -uint8_t aspa_validation(struct rde_aspa *, enum role, struct aspath *, - uint8_t); +void aspa_table_stats(const struct rde_aspa *, + struct ctl_show_set *); +int aspa_table_equal(const struct rde_aspa *, + const struct rde_aspa *); +void aspa_table_unchanged(struct rde_aspa *, + const struct rde_aspa *); #endif /* __RDE_H__ */ diff --git a/usr.sbin/bgpd/rde_aspa.c b/usr.sbin/bgpd/rde_aspa.c index 3bc5960890b..51c0135e9a2 100644 --- a/usr.sbin/bgpd/rde_aspa.c +++ b/usr.sbin/bgpd/rde_aspa.c @@ -1,4 +1,4 @@ -/* $OpenBSD: rde_aspa.c,v 1.1 2023/01/11 13:53:17 claudio Exp $ */ +/* $OpenBSD: rde_aspa.c,v 1.2 2023/01/17 16:09:01 claudio Exp $ */ /* * Copyright (c) 2022 Claudio Jeker @@ -52,6 +52,7 @@ struct rde_aspa { size_t maxdata; size_t curdata; uint32_t curset; + time_t lastchange; }; struct aspa_state { @@ -114,14 +115,14 @@ static enum cp_res aspa_cp_lookup(struct rde_aspa *ra, uint32_t cas, uint32_t pas, uint8_t aid) { struct rde_aspa_set *aspa; - uint32_t i; + uint32_t i, mask; switch (aid) { case AID_INET: - aid = 0x1; + mask = 0x1; break; case AID_INET6: - aid = 0x2; + mask = 0x2; break; default: return UNKNOWN; @@ -162,7 +163,7 @@ aspa_cp_lookup(struct rde_aspa *ra, uint32_t cas, uint32_t pas, uint8_t aid) if (aspa->pas_aid == NULL) return PROVIDER; - if (aspa->pas_aid[i / 16] & (aid << ((i % 16) * 2))) + if (aspa->pas_aid[i / 16] & (mask << ((i % 16) * 2))) return PROVIDER; return NOT_PROVIDER; } @@ -336,10 +337,10 @@ aspa_validation(struct rde_aspa *ra, enum role role, struct aspath *a, /* * Preallocate all data structures needed for the aspa table. * There are entries number of rde_aspa_sets with data_size bytes of - * extra data. + * extra data (used to store SPAS and optional AFI bitmasks). */ struct rde_aspa * -aspa_table_prep(uint32_t entries, size_t data_size) +aspa_table_prep(uint32_t entries, size_t datasize) { struct rde_aspa *ra; uint32_t hsize = 1024; @@ -361,12 +362,13 @@ aspa_table_prep(uint32_t entries, size_t data_size) if ((ra->sets = calloc(entries, sizeof(ra->sets[0]))) == NULL) fatal("aspa table prep"); - if ((ra->data = malloc(data_size)) == NULL) + if ((ra->data = malloc(datasize)) == NULL) fatal("aspa table prep"); ra->mask = hsize - 1; ra->maxset = entries; - ra->maxdata = data_size / sizeof(ra->data[0]); + ra->maxdata = datasize / sizeof(ra->data[0]); + ra->lastchange = getmonotime(); return ra; } @@ -442,3 +444,46 @@ aspa_table_free(struct rde_aspa *ra) free(ra->data); free(ra); } + +void +aspa_table_stats(const struct rde_aspa *ra, struct ctl_show_set *cset) +{ + if (ra == NULL) + return; + cset->lastchange = ra->lastchange; + cset->as_cnt = ra->maxset; +} + +/* + * Return true if the two rde_aspa tables are contain the same data. + */ +int +aspa_table_equal(const struct rde_aspa *ra, const struct rde_aspa *rb) +{ + uint32_t i; + + /* allow NULL pointers to be passed */ + if (ra == NULL && rb == NULL) + return 1; + if (ra == NULL || rb == NULL) + return 0; + + if (ra->maxset != rb->maxset || + ra->maxdata != rb->maxdata) + return 0; + for (i = 0; i < ra->maxset; i++) + if (ra->sets[i].as != rb->sets[i].as) + return 0; + if (memcmp(ra->data, rb->data, ra->maxdata * sizeof(ra->data[0])) != 0) + return 0; + + return 1; +} + +void +aspa_table_unchanged(struct rde_aspa *ra, const struct rde_aspa *old) +{ + if (ra == NULL || old == NULL) + return; + ra->lastchange = old->lastchange; +} diff --git a/usr.sbin/bgpd/rtr.c b/usr.sbin/bgpd/rtr.c index cd3756f88f4..e1bc0911eae 100644 --- a/usr.sbin/bgpd/rtr.c +++ b/usr.sbin/bgpd/rtr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: rtr.c,v 1.9 2022/11/18 10:17:23 claudio Exp $ */ +/* $OpenBSD: rtr.c,v 1.10 2023/01/17 16:09:01 claudio Exp $ */ /* * Copyright (c) 2020 Claudio Jeker @@ -428,25 +428,15 @@ aspa_set_entry(struct aspa_set *aspa, uint32_t asnum, uint8_t aid) uint32_t i, num, *newtas; uint8_t *newtasaid; - switch (aid) { - case AID_INET: - aid = 0x1; - break; - case AID_INET6: - aid = 0x2; - break; - case AID_UNSPEC: - aid = 0x3; - break; - default: - fatalx("aspa_set bad AID"); - } + if (aid != AID_UNSPEC && aid != AID_INET && aid != AID_INET6) + fatalx("aspa set with invalid AFI %s", aid2str(aid)); for (i = 0; i < aspa->num; i++) { if (asnum < aspa->tas[i] || aspa->tas[i] == 0) break; if (asnum == aspa->tas[i]) { - aspa->tas_aid[i] |= aid; + if (aspa->tas_aid[i] != aid) + aspa->tas_aid[i] = AID_UNSPEC; return; } } @@ -488,6 +478,53 @@ rtr_aspa_merge_set(struct aspa_tree *a, struct aspa_set *mergeset) aspa_set_entry(aspa, mergeset->tas[i], mergeset->tas_aid[i]); } +/* + * Compress aspa_set tas_aid into the bitfield used by the RDE. + * Returns the size of tas and tas_aid bitfield required for this aspa_set. + * At the same time tas_aid is overwritten with the bitmasks or cleared + * if no extra aid masks are needed. + */ +static size_t +rtr_aspa_set_prep(struct aspa_set *aspa) +{ + uint32_t i, mask = 0; + int needafi = 0; + size_t s; + + s = aspa->num * sizeof(uint32_t); + for (i = 0; i < aspa->num; i++) { + switch (aspa->tas_aid[i]) { + case AID_INET: + needafi = 1; + mask |= 0x1 << ((i % 16) * 2); + break; + case AID_INET6: + needafi = 1; + mask |= 0x2 << ((i % 16) * 2); + break; + default: + mask |= 0x3 << ((i % 16) * 2); + break; + } + if (i % 16 == 15) { + memcpy(aspa->tas_aid + (i / 16) * sizeof(mask), &mask, + sizeof(mask)); + mask = 0; + } + } + + if (!needafi) { + free(aspa->tas_aid); + aspa->tas_aid = NULL; + } else { + memcpy(aspa->tas_aid + (aspa->num / 16) * sizeof(mask), &mask, + sizeof(mask)); + s += (aspa->num + 15) / 16; + } + + return s; +} + /* * Merge all RPKI ROA trees into one as one big union. * Simply try to add all roa entries into a new RB tree. @@ -500,6 +537,7 @@ rtr_recalc(void) struct aspa_tree at; struct roa *roa, *nr; struct aspa_set *aspa; + struct aspa_prep ap = { 0 }; RB_INIT(&rt); RB_INIT(&at); @@ -510,15 +548,38 @@ rtr_recalc(void) imsg_compose(ibuf_rde, IMSG_RECONF_ROA_SET, 0, 0, -1, NULL, 0); RB_FOREACH_SAFE(roa, roa_tree, &rt, nr) { - RB_REMOVE(roa_tree, &rt, roa); imsg_compose(ibuf_rde, IMSG_RECONF_ROA_ITEM, 0, 0, -1, roa, sizeof(*roa)); - free(roa); } + free_roatree(&rt); RB_FOREACH(aspa, aspa_tree, &conf->aspa) rtr_aspa_merge_set(&at, aspa); + RB_FOREACH(aspa, aspa_tree, &at) { + ap.datasize += rtr_aspa_set_prep(aspa); + ap.entries++; + } + + imsg_compose(ibuf_rde, IMSG_RECONF_ASPA_PREP, 0, 0, -1, + &ap, sizeof(ap)); + + RB_FOREACH(aspa, aspa_tree, &at) { + uint32_t as[2]; + as[0] = aspa->as; + as[1] = aspa->num; + + imsg_compose(ibuf_rde, IMSG_RECONF_ASPA, 0, 0, -1, + &as, sizeof(as)); + imsg_compose(ibuf_rde, IMSG_RECONF_ASPA_TAS, 0, 0, -1, + aspa->tas, aspa->num * sizeof(*aspa->tas)); + if (aspa->tas_aid) + imsg_compose(ibuf_rde, IMSG_RECONF_ASPA_TAS, 0, 0, -1, + aspa->tas_aid, (aspa->num + 15) / 16); + imsg_compose(ibuf_rde, IMSG_RECONF_ASPA_DONE, 0, 0, -1, + NULL, 0); + } + free_aspatree(&at); imsg_compose(ibuf_rde, IMSG_RECONF_DONE, 0, 0, -1, NULL, 0); -- 2.20.1