-/* $OpenBSD: ip_ipsp.h,v 1.230 2021/12/11 16:33:47 bluhm Exp $ */
+/* $OpenBSD: ip_ipsp.h,v 1.231 2021/12/14 17:50:37 bluhm Exp $ */
/*
* The authors of this code are John Ioannidis (ji@tla.org),
* Angelos D. Keromytis (kermit@csd.uch.gr),
TAILQ_ENTRY(ipsec_acquire) ipa_next;
};
+/*
+ * Locks used to protect struct members in this file:
+ * p ipo_tdb_mtx link policy to TDB global mutex
+ */
struct ipsec_policy {
struct radix_node ipo_nodes[2]; /* radix tree glue */
struct sockaddr_encap ipo_addr;
* mode was used.
*/
- u_int64_t ipo_last_searched; /* Timestamp of last lookup */
+ u_int64_t ipo_last_searched; /* [p] Timestamp of lookup */
u_int8_t ipo_flags; /* See IPSP_POLICY_* definitions */
u_int8_t ipo_type; /* USE/ACQUIRE/... */
int ipo_ref_count;
- struct tdb *ipo_tdb; /* Cached entry */
+ struct tdb *ipo_tdb; /* [p] Cached TDB entry */
struct ipsec_ids *ipo_ids;
* Locks used to protect struct members in this file:
* I immutable after creation
* N net lock
- * s tdb_sadb_mtx
* m tdb_mtx
+ * p ipo_tdb_mtx link policy to TDB global mutex
+ * s tdb_sadb_mtx SA database global mutex
*/
struct tdb { /* tunnel descriptor block */
/*
struct sockaddr_encap tdb_filter; /* What traffic is acceptable */
struct sockaddr_encap tdb_filtermask; /* And the mask */
- TAILQ_HEAD(tdb_policy_head, ipsec_policy) tdb_policy_head;
+ TAILQ_HEAD(tdb_policy_head, ipsec_policy) tdb_policy_head; /* [p] */
TAILQ_ENTRY(tdb) tdb_sync_entry;
};
#define tdb_ipackets tdb_data.tdd_ipackets
extern TAILQ_HEAD(ipsec_policy_head, ipsec_policy) ipsec_policy_head;
extern struct mutex tdb_sadb_mtx;
+extern struct mutex ipo_tdb_mtx;
struct cryptop;
-/* $OpenBSD: ip_spd.c,v 1.108 2021/12/03 17:18:34 bluhm Exp $ */
+/* $OpenBSD: ip_spd.c,v 1.109 2021/12/14 17:50:37 bluhm Exp $ */
/*
* The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
*
struct pool ipsec_policy_pool;
struct pool ipsec_acquire_pool;
+/*
+ * For tdb_walk() calling tdb_delete_locked() we need lock order
+ * tdb_sadb_mtx before ipo_tdb_mtx.
+ */
+struct mutex ipo_tdb_mtx = MUTEX_INITIALIZER(IPL_SOFTNET);
+
/* Protected by the NET_LOCK(). */
struct radix_node_head **spd_tables;
unsigned int spd_table_max;
}
/* Do we have a cached entry ? If so, check if it's still valid. */
+ mtx_enter(&ipo_tdb_mtx);
if (ipo->ipo_tdb != NULL &&
(ipo->ipo_tdb->tdb_flags & TDBF_INVALID)) {
TAILQ_REMOVE(&ipo->ipo_tdb->tdb_policy_head, ipo,
tdb_unref(ipo->ipo_tdb);
ipo->ipo_tdb = NULL;
}
+ mtx_leave(&ipo_tdb_mtx);
/* Outgoing packet policy check. */
if (direction == IPSP_DIRECTION_OUT) {
ids = ipsp_ids_lookup(ipsecflowinfo);
/* Check that the cached TDB (if present), is appropriate. */
+ mtx_enter(&ipo_tdb_mtx);
if (ipo->ipo_tdb != NULL) {
if ((ipo->ipo_last_searched <= ipsec_last_added) ||
(ipo->ipo_sproto != ipo->ipo_tdb->tdb_sproto) ||
goto nomatchout;
/* Cached entry is good. */
- return ipsp_spd_inp(m, inp, ipo, tdbout);
+ error = ipsp_spd_inp(m, inp, ipo, tdbout);
+ mtx_leave(&ipo_tdb_mtx);
+ return error;
nomatchout:
/* Cached TDB was not good. */
* explosion in the number of acquired SAs).
*/
if (ipo->ipo_last_searched <= ipsec_last_added) {
+ struct tdb *tdbp_new;
+
/* "Touch" the entry. */
if (dignore == 0)
ipo->ipo_last_searched = getuptime();
+ /* gettdb() takes tdb_sadb_mtx, preserve lock order */
+ mtx_leave(&ipo_tdb_mtx);
/* Find an appropriate SA from the existing ones. */
- ipo->ipo_tdb = gettdbbydst(rdomain,
+ tdbp_new = gettdbbydst(rdomain,
dignore ? &sdst : &ipo->ipo_dst,
ipo->ipo_sproto, ids ? ids: ipo->ipo_ids,
&ipo->ipo_addr, &ipo->ipo_mask);
+ mtx_enter(&ipo_tdb_mtx);
+ if (ipo->ipo_tdb != NULL) {
+ /* Remove cached TDB from parallel thread. */
+ TAILQ_REMOVE(&ipo->ipo_tdb->tdb_policy_head,
+ ipo, ipo_tdb_next);
+ tdb_unref(ipo->ipo_tdb);
+ }
+ ipo->ipo_tdb = tdbp_new;
if (ipo->ipo_tdb != NULL) {
/* gettdbbydst() has already refcounted tdb */
TAILQ_INSERT_TAIL(
&ipo->ipo_tdb->tdb_policy_head,
ipo, ipo_tdb_next);
- return ipsp_spd_inp(m, inp, ipo, tdbout);
+ error = ipsp_spd_inp(m, inp, ipo, tdbout);
+ mtx_leave(&ipo_tdb_mtx);
+ return error;
}
}
+ mtx_leave(&ipo_tdb_mtx);
/* So, we don't have an SA -- just a policy. */
switch (ipo->ipo_type) {
tdbp = tdbp->tdb_inext;
/* Direct match in the cache. */
- if (ipo->ipo_tdb == tdbp)
- return ipsp_spd_inp(m, inp, ipo, tdbout);
+ mtx_enter(&ipo_tdb_mtx);
+ if (ipo->ipo_tdb == tdbp) {
+ error = ipsp_spd_inp(m, inp, ipo, tdbout);
+ mtx_leave(&ipo_tdb_mtx);
+ return error;
+ }
+ mtx_leave(&ipo_tdb_mtx);
if (memcmp(dignore ? &ssrc : &ipo->ipo_dst,
&tdbp->tdb_src, tdbp->tdb_src.sa.sa_len) ||
goto nomatchin;
/* Add it to the cache. */
+ mtx_enter(&ipo_tdb_mtx);
if (ipo->ipo_tdb != NULL) {
TAILQ_REMOVE(&ipo->ipo_tdb->tdb_policy_head,
ipo, ipo_tdb_next);
ipo->ipo_tdb = tdb_ref(tdbp);
TAILQ_INSERT_TAIL(&tdbp->tdb_policy_head, ipo,
ipo_tdb_next);
- return ipsp_spd_inp(m, inp, ipo, tdbout);
+ error = ipsp_spd_inp(m, inp, ipo, tdbout);
+ mtx_leave(&ipo_tdb_mtx);
+ return error;
nomatchin: /* Nothing needed here, falling through */
;
}
/* Check whether cached entry applies. */
+ mtx_enter(&ipo_tdb_mtx);
if (ipo->ipo_tdb != NULL) {
/*
* We only need to check that the correct
/* Find whether there exists an appropriate SA. */
if (ipo->ipo_last_searched <= ipsec_last_added) {
+ struct tdb *tdbp_new;
+
if (dignore == 0)
ipo->ipo_last_searched = getuptime();
- ipo->ipo_tdb = gettdbbysrc(rdomain,
+ /* gettdb() takes tdb_sadb_mtx, preserve lock order */
+ mtx_leave(&ipo_tdb_mtx);
+ tdbp_new = gettdbbysrc(rdomain,
dignore ? &ssrc : &ipo->ipo_dst,
ipo->ipo_sproto, ipo->ipo_ids,
&ipo->ipo_addr, &ipo->ipo_mask);
+ mtx_enter(&ipo_tdb_mtx);
+ if (ipo->ipo_tdb != NULL) {
+ /* Remove cached TDB from parallel thread. */
+ TAILQ_REMOVE(&ipo->ipo_tdb->tdb_policy_head,
+ ipo, ipo_tdb_next);
+ tdb_unref(ipo->ipo_tdb);
+ }
+ ipo->ipo_tdb = tdbp_new;
if (ipo->ipo_tdb != NULL) {
/* gettdbbysrc() has already refcounted tdb */
TAILQ_INSERT_TAIL(
}
}
skipinputsearch:
+ mtx_leave(&ipo_tdb_mtx);
switch (ipo->ipo_type) {
case IPSP_IPSEC_REQUIRE:
rn_delete(&ipo->ipo_addr, &ipo->ipo_mask, rnh, rn) == NULL)
return (ESRCH);
+ mtx_enter(&ipo_tdb_mtx);
if (ipo->ipo_tdb != NULL) {
TAILQ_REMOVE(&ipo->ipo_tdb->tdb_policy_head, ipo,
ipo_tdb_next);
tdb_unref(ipo->ipo_tdb);
ipo->ipo_tdb = NULL;
}
+ mtx_leave(&ipo_tdb_mtx);
while ((ipa = TAILQ_FIRST(&ipo->ipo_acquires)) != NULL)
ipsp_delete_acquire(ipa);
justreturn:
if (tdbout != NULL) {
- if (ipo != NULL) {
- tdb_ref(ipo->ipo_tdb);
- *tdbout = ipo->ipo_tdb;
- } else
+ if (ipo != NULL)
+ *tdbout = tdb_ref(ipo->ipo_tdb);
+ else
*tdbout = NULL;
}
return 0;