From: claudio Date: Fri, 23 Sep 2022 15:49:20 +0000 (+0000) Subject: Implement a special update generator for add-path send all. X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=910ddab463d090b3816d386d2ad106b2163ee620;p=openbsd Implement a special update generator for add-path send all. The generic add-path code up_generate_addpath() reevaluates everything since this is the simplest way to select the announced paths. For add-path all this is overkill since there is no dependency between prefixes and so individual prefixes can be handled more efficently. Extend rde_generate_updates() to pass the current newbest and oldbest prefixes (for the selected best path) but now also include newpath and oldpath (which is the prefix that is added/removed/modified). If newpath or oldpath is set then a single prefix was altered and up_generate_addpath_all() can just remove or add this prefix. If newpath and oldpath are NULL than the full list based on newbest needs to be inserted and any old path/prefix removed in the process. This improves update generation performance on big route collectors using add-path all substantially. OK tb@ --- diff --git a/usr.sbin/bgpd/rde.c b/usr.sbin/bgpd/rde.c index 2941db9f941..95070004bb1 100644 --- a/usr.sbin/bgpd/rde.c +++ b/usr.sbin/bgpd/rde.c @@ -1,4 +1,4 @@ -/* $OpenBSD: rde.c,v 1.577 2022/09/21 10:39:17 claudio Exp $ */ +/* $OpenBSD: rde.c,v 1.578 2022/09/23 15:49:20 claudio Exp $ */ /* * Copyright (c) 2003, 2004 Henning Brauer @@ -3758,7 +3758,7 @@ rde_softreconfig_out(struct rib_entry *re, void *arg) /* no valid path for prefix */ return; - rde_generate_updates(rib, p, NULL, EVAL_RECONF); + rde_generate_updates(rib, p, NULL, NULL, NULL, EVAL_RECONF); } static void diff --git a/usr.sbin/bgpd/rde.h b/usr.sbin/bgpd/rde.h index 231dac6c242..57424e5deef 100644 --- a/usr.sbin/bgpd/rde.h +++ b/usr.sbin/bgpd/rde.h @@ -1,4 +1,4 @@ -/* $OpenBSD: rde.h,v 1.272 2022/09/21 10:39:17 claudio Exp $ */ +/* $OpenBSD: rde.h,v 1.273 2022/09/23 15:49:20 claudio Exp $ */ /* * Copyright (c) 2003, 2004 Claudio Jeker and @@ -390,7 +390,8 @@ void rde_pftable_del(uint16_t, struct prefix *); int rde_evaluate_all(void); void rde_generate_updates(struct rib *, struct prefix *, - struct prefix *, enum eval_mode); + struct prefix *, struct prefix *, struct prefix *, + enum eval_mode); uint32_t rde_local_as(void); int rde_decisionflags(void); void rde_peer_send_rrefresh(struct rde_peer *, uint8_t, uint8_t); @@ -700,6 +701,9 @@ void up_generate_updates(struct filter_head *, struct rde_peer *, struct prefix *, struct prefix *); void up_generate_addpath(struct filter_head *, struct rde_peer *, struct prefix *, struct prefix *); +void up_generate_addpath_all(struct filter_head *, + struct rde_peer *, struct prefix *, struct prefix *, + struct prefix *); void up_generate_default(struct filter_head *, struct rde_peer *, uint8_t); int up_is_eor(struct rde_peer *, uint8_t); diff --git a/usr.sbin/bgpd/rde_decide.c b/usr.sbin/bgpd/rde_decide.c index 062fa78b55c..b9d7860627b 100644 --- a/usr.sbin/bgpd/rde_decide.c +++ b/usr.sbin/bgpd/rde_decide.c @@ -1,4 +1,4 @@ -/* $OpenBSD: rde_decide.c,v 1.97 2022/07/25 16:37:55 claudio Exp $ */ +/* $OpenBSD: rde_decide.c,v 1.98 2022/09/23 15:49:20 claudio Exp $ */ /* * Copyright (c) 2003, 2004 Claudio Jeker @@ -556,7 +556,8 @@ prefix_evaluate(struct rib_entry *re, struct prefix *new, struct prefix *old) * but remember that newbest may be NULL aka ineligible. * Additional decision may be made by the called functions. */ - rde_generate_updates(rib, newbest, oldbest, EVAL_DEFAULT); + rde_generate_updates(rib, newbest, oldbest, new, old, + EVAL_DEFAULT); if ((rib->flags & F_RIB_NOFIB) == 0) rde_send_kroute(rib, newbest, oldbest); return; @@ -569,7 +570,8 @@ prefix_evaluate(struct rib_entry *re, struct prefix *new, struct prefix *old) */ if (rde_evaluate_all()) if ((new != NULL && prefix_eligible(new)) || old != NULL) - rde_generate_updates(rib, newbest, NULL, EVAL_ALL); + rde_generate_updates(rib, newbest, NULL, new, old, + EVAL_ALL); } void @@ -628,7 +630,7 @@ prefix_evaluate_nexthop(struct prefix *p, enum nexthop_state state, * but remember that newbest may be NULL aka ineligible. * Additional decision may be made by the called functions. */ - rde_generate_updates(rib, newbest, oldbest, EVAL_DEFAULT); + rde_generate_updates(rib, newbest, oldbest, p, p, EVAL_DEFAULT); if ((rib->flags & F_RIB_NOFIB) == 0) rde_send_kroute(rib, newbest, oldbest); return; @@ -640,5 +642,5 @@ prefix_evaluate_nexthop(struct prefix *p, enum nexthop_state state, * rde_generate_updates() will then take care of distribution. */ if (rde_evaluate_all()) - rde_generate_updates(rib, newbest, NULL, EVAL_ALL); + rde_generate_updates(rib, newbest, NULL, p, p, EVAL_ALL); } diff --git a/usr.sbin/bgpd/rde_peer.c b/usr.sbin/bgpd/rde_peer.c index 03659415c3d..310a466f0fc 100644 --- a/usr.sbin/bgpd/rde_peer.c +++ b/usr.sbin/bgpd/rde_peer.c @@ -1,4 +1,4 @@ -/* $OpenBSD: rde_peer.c,v 1.24 2022/09/21 10:39:17 claudio Exp $ */ +/* $OpenBSD: rde_peer.c,v 1.25 2022/09/23 15:49:20 claudio Exp $ */ /* * Copyright (c) 2019 Claudio Jeker @@ -206,14 +206,20 @@ RB_GENERATE(peer_tree, rde_peer, entry, peer_cmp); static void peer_generate_update(struct rde_peer *peer, uint16_t rib_id, - struct prefix *new, struct prefix *old, enum eval_mode mode) + struct prefix *newbest, struct prefix *oldbest, + struct prefix *newpath, struct prefix *oldpath, + enum eval_mode mode) { uint8_t aid; - if (new != NULL) - aid = new->pt->aid; - else if (old != NULL) - aid = old->pt->aid; + if (newbest != NULL) + aid = newbest->pt->aid; + else if (oldbest != NULL) + aid = oldbest->pt->aid; + else if (newpath != NULL) + aid = newpath->pt->aid; + else if (oldpath != NULL) + aid = oldpath->pt->aid; else return; @@ -239,32 +245,38 @@ peer_generate_update(struct rde_peer *peer, uint16_t rib_id, /* handle peers with add-path */ if (peer_has_add_path(peer, aid, CAPA_AP_SEND)) { - up_generate_addpath(out_rules, peer, new, old); + if (peer->eval.mode == ADDPATH_EVAL_ALL) + up_generate_addpath_all(out_rules, peer, newbest, + newpath, oldpath); + else + up_generate_addpath(out_rules, peer, newbest, oldbest); return; } /* skip regular peers if the best path didn't change */ if (mode == EVAL_ALL && (peer->flags & PEERFLAG_EVALUATE_ALL) == 0) return; - up_generate_updates(out_rules, peer, new, old); + up_generate_updates(out_rules, peer, newbest, oldbest); } void -rde_generate_updates(struct rib *rib, struct prefix *new, struct prefix *old, +rde_generate_updates(struct rib *rib, struct prefix *newbest, + struct prefix *oldbest, struct prefix *newpath, struct prefix *oldpath, enum eval_mode mode) { struct rde_peer *peer; /* - * If old is != NULL we know it was active and should be removed. - * If new is != NULL we know it is reachable and then we should + * If oldbest is != NULL we know it was active and should be removed. + * If newbest is != NULL we know it is reachable and then we should * generate an update. */ - if (old == NULL && new == NULL) + if (oldbest == NULL && newbest == NULL) return; RB_FOREACH(peer, peer_tree, &peertable) - peer_generate_update(peer, rib->id, new, old, mode); + peer_generate_update(peer, rib->id, newbest, oldbest, newpath, + oldpath, mode); } /* @@ -372,10 +384,10 @@ rde_up_dump_upcall(struct rib_entry *re, void *ptr) struct rde_peer *peer = ptr; struct prefix *p; - /* no eligible prefix, not even for 'evaluate all' */ if ((p = prefix_best(re)) == NULL) + /* no eligible prefix, not even for 'evaluate all' */ return; - peer_generate_update(peer, re->rib_id, p, NULL, 0); + peer_generate_update(peer, re->rib_id, p, NULL, NULL, NULL, 0); } static void diff --git a/usr.sbin/bgpd/rde_update.c b/usr.sbin/bgpd/rde_update.c index 6fca7b2faf8..6e1558f73b4 100644 --- a/usr.sbin/bgpd/rde_update.c +++ b/usr.sbin/bgpd/rde_update.c @@ -1,4 +1,4 @@ -/* $OpenBSD: rde_update.c,v 1.147 2022/09/01 13:19:11 claudio Exp $ */ +/* $OpenBSD: rde_update.c,v 1.148 2022/09/23 15:49:20 claudio Exp $ */ /* * Copyright (c) 2004 Claudio Jeker @@ -361,6 +361,113 @@ up_generate_addpath(struct filter_head *rules, struct rde_peer *peer, } } +/* + * Generate updates for the add-path send all case. Since all prefixes + * are distributed just remove old and add new. + */ +void +up_generate_addpath_all(struct filter_head *rules, struct rde_peer *peer, + struct prefix *best, struct prefix *new, struct prefix *old) +{ + struct filterstate state; + struct bgpd_addr addr; + struct prefix *p, *next, *head = NULL; + uint8_t prefixlen; + int all = 0; + + /* + * if old and new are NULL then insert all prefixes from best, + * clearing old routes in the process + */ + if (old == NULL && new == NULL) { + /* mark all paths as stale */ + pt_getaddr(best->pt, &addr); + prefixlen = best->pt->prefixlen; + + head = prefix_adjout_lookup(peer, &addr, prefixlen); + for (p = head; p != NULL; p = prefix_adjout_next(peer, p)) + p->flags |= PREFIX_FLAG_STALE; + + new = best; + all = 1; + } + + if (old != NULL) { + /* withdraw stale paths */ + pt_getaddr(old->pt, &addr); + p = prefix_adjout_get(peer, old->path_id_tx, &addr, + old->pt->prefixlen); + if (p != NULL) + prefix_adjout_withdraw(p); + } + + if (new != NULL) { + pt_getaddr(new->pt, &addr); + prefixlen = new->pt->prefixlen; + } + + /* add new path (or multiple if all is set) */ + for (; new != NULL; new = next) { + if (all) + next = TAILQ_NEXT(new, entry.list.rib); + else + next = NULL; + + /* only allow valid prefixes */ + if (!prefix_eligible(new)) + break; + + /* + * up_test_update() needs to run before the output filters + * else the well known communities won't work properly. + * The output filters would not be able to add well known + * communities. + */ + if (!up_test_update(peer, new)) + continue; + + rde_filterstate_prep(&state, prefix_aspath(new), + prefix_communities(new), prefix_nexthop(new), + prefix_nhflags(new)); + if (rde_filter(rules, peer, prefix_peer(new), &addr, + prefixlen, prefix_vstate(new), &state) == ACTION_DENY) { + rde_filterstate_clean(&state); + continue; + } + + if (up_enforce_open_policy(peer, &state)) { + rde_filterstate_clean(&state); + continue; + } + + /* from here on we know this is an update */ + p = prefix_adjout_get(peer, new->path_id_tx, &addr, prefixlen); + + up_prep_adjout(peer, &state, addr.aid); + prefix_adjout_update(p, peer, &state, &addr, + prefixlen, new->path_id_tx, prefix_vstate(new)); + rde_filterstate_clean(&state); + + /* max prefix checker outbound */ + if (peer->conf.max_out_prefix && + peer->prefix_out_cnt > peer->conf.max_out_prefix) { + log_peer_warnx(&peer->conf, + "outbound prefix limit reached (>%u/%u)", + peer->prefix_out_cnt, peer->conf.max_out_prefix); + rde_update_err(peer, ERR_CEASE, + ERR_CEASE_MAX_SENT_PREFIX, NULL, 0); + } + } + + if (all) { + /* withdraw stale paths */ + for (p = head; p != NULL; p = prefix_adjout_next(peer, p)) { + if (p->flags & PREFIX_FLAG_STALE) + prefix_adjout_withdraw(p); + } + } +} + struct rib_entry *rib_add(struct rib *, struct bgpd_addr *, int); void rib_remove(struct rib_entry *); int rib_empty(struct rib_entry *);