COMMON_OBJ=answer.o axfr.o ixfr.o ixfrcreate.o buffer.o configlexer.o configparser.o dname.o dns.o edns.o iterated_hash.o lookup3.o namedb.o nsec3.o options.o packet.o query.o rbtree.o radtree.o rdata.o region-allocator.o rrl.o siphash.o tsig.o tsig-openssl.o udb.o udbradtree.o udbzone.o util.o bitset.o popen3.o
XFRD_OBJ=xfrd-disk.o xfrd-notify.o xfrd-tcp.o xfrd.o remote.o $(DNSTAP_OBJ)
-NSD_OBJ=$(COMMON_OBJ) $(XFRD_OBJ) difffile.o ipc.o mini_event.o netio.o nsd.o server.o dbaccess.o dbcreate.o zlexer.o zonec.o zparser.o
+NSD_OBJ=$(COMMON_OBJ) $(XFRD_OBJ) difffile.o ipc.o mini_event.o netio.o nsd.o server.o dbaccess.o dbcreate.o zlexer.o zonec.o zparser.o verify.o
ALL_OBJ=$(NSD_OBJ) nsd-checkconf.o nsd-checkzone.o nsd-control.o nsd-mem.o xfr-inspect.o
NSD_CHECKCONF_OBJ=$(COMMON_OBJ) nsd-checkconf.o
-NSD_CHECKZONE_OBJ=$(COMMON_OBJ) $(XFRD_OBJ) dbaccess.o dbcreate.o difffile.o ipc.o mini_event.o netio.o server.o zonec.o zparser.o zlexer.o nsd-checkzone.o
+NSD_CHECKZONE_OBJ=$(COMMON_OBJ) $(XFRD_OBJ) dbaccess.o dbcreate.o difffile.o ipc.o mini_event.o netio.o server.o zonec.o zparser.o zlexer.o nsd-checkzone.o verify.o
NSD_CONTROL_OBJ=$(COMMON_OBJ) nsd-control.o
-CUTEST_OBJ=$(COMMON_OBJ) $(XFRD_OBJ) dbaccess.o dbcreate.o difffile.o ipc.o mini_event.o netio.o server.o zonec.o zparser.o zlexer.o cutest_dname.o cutest_dns.o cutest_iterated_hash.o cutest_run.o cutest_radtree.o cutest_rbtree.o cutest_namedb.o cutest_options.o cutest_region.o cutest_rrl.o cutest_udb.o cutest_udbrad.o cutest_util.o cutest_bitset.o cutest_popen3.o cutest_iter.o cutest_event.o cutest.o qtest.o
-NSD_MEM_OBJ=$(COMMON_OBJ) $(XFRD_OBJ) dbaccess.o dbcreate.o difffile.o ipc.o mini_event.o netio.o server.o zonec.o zparser.o zlexer.o nsd-mem.o
+CUTEST_OBJ=$(COMMON_OBJ) $(XFRD_OBJ) dbaccess.o dbcreate.o difffile.o ipc.o mini_event.o netio.o server.o verify.o zonec.o zparser.o zlexer.o cutest_dname.o cutest_dns.o cutest_iterated_hash.o cutest_run.o cutest_radtree.o cutest_rbtree.o cutest_namedb.o cutest_options.o cutest_region.o cutest_rrl.o cutest_udb.o cutest_udbrad.o cutest_util.o cutest_bitset.o cutest_popen3.o cutest_iter.o cutest_event.o cutest.o qtest.o
+NSD_MEM_OBJ=$(COMMON_OBJ) $(XFRD_OBJ) dbaccess.o dbcreate.o difffile.o ipc.o mini_event.o netio.o verify.o server.o zonec.o zparser.o zlexer.o nsd-mem.o
all: $(TARGETS) $(MANUALS)
$(ALL_OBJ):
server.o: $(srcdir)/server.c config.h $(srcdir)/axfr.h $(srcdir)/nsd.h $(srcdir)/dns.h $(srcdir)/edns.h $(srcdir)/buffer.h \
$(srcdir)/region-allocator.h $(srcdir)/util.h $(srcdir)/query.h $(srcdir)/namedb.h $(srcdir)/dname.h $(srcdir)/radtree.h $(srcdir)/rbtree.h \
$(srcdir)/packet.h $(srcdir)/tsig.h $(srcdir)/netio.h $(srcdir)/xfrd.h $(srcdir)/options.h $(srcdir)/xfrd-tcp.h $(srcdir)/xfrd-disk.h \
- $(srcdir)/difffile.h $(srcdir)/udb.h $(srcdir)/nsec3.h $(srcdir)/ipc.h $(srcdir)/remote.h $(srcdir)/lookup3.h $(srcdir)/dnstap/dnstap_collector.h $(srcdir)/rrl.h $(srcdir)/ixfr.h
+ $(srcdir)/difffile.h $(srcdir)/udb.h $(srcdir)/nsec3.h $(srcdir)/ipc.h $(srcdir)/remote.h $(srcdir)/lookup3.h $(srcdir)/dnstap/dnstap_collector.h $(srcdir)/rrl.h $(srcdir)/ixfr.h $(srcdir)/verify.h
siphash.o: $(srcdir)/siphash.c
tsig.o: $(srcdir)/tsig.c config.h $(srcdir)/tsig.h $(srcdir)/buffer.h $(srcdir)/region-allocator.h $(srcdir)/util.h $(srcdir)/dname.h \
$(srcdir)/tsig-openssl.h $(srcdir)/dns.h $(srcdir)/packet.h $(srcdir)/namedb.h $(srcdir)/radtree.h $(srcdir)/rbtree.h $(srcdir)/query.h $(srcdir)/nsd.h \
util.o: $(srcdir)/util.c config.h $(srcdir)/util.h $(srcdir)/region-allocator.h $(srcdir)/dname.h $(srcdir)/buffer.h \
$(srcdir)/namedb.h $(srcdir)/dns.h $(srcdir)/radtree.h $(srcdir)/rbtree.h $(srcdir)/rdata.h $(srcdir)/zonec.h
bitset.o: $(srcdir)/bitset.c $(srcdir)/bitset.h
+verify.o: $(srcdir)/verify.c config.h $(srcdir)/region-allocator.h $(srcdir)/namedb.h $(srcdir)/dname.h $(srcdir)/buffer.h \
+ $(srcdir)/util.h config.h $(srcdir)/dns.h $(srcdir)/rbtree.h $(srcdir)/nsd.h $(srcdir)/edns.h $(srcdir)/options.h $(srcdir)/difffile.h \
+ $(srcdir)/netio.h $(srcdir)/verify.h
xfrd.o: $(srcdir)/xfrd.c config.h $(srcdir)/xfrd.h $(srcdir)/rbtree.h $(srcdir)/region-allocator.h $(srcdir)/namedb.h \
$(srcdir)/dname.h $(srcdir)/buffer.h $(srcdir)/util.h $(srcdir)/dns.h $(srcdir)/radtree.h $(srcdir)/options.h $(srcdir)/tsig.h $(srcdir)/xfrd-tcp.h \
$(srcdir)/xfrd-disk.h $(srcdir)/xfrd-notify.h $(srcdir)/netio.h $(srcdir)/nsd.h $(srcdir)/edns.h $(srcdir)/packet.h $(srcdir)/rdata.h \
/* Define this to enable per-zone statistics gathering. */
#undef USE_ZONE_STATS
+/* Define to the default zone verification udp port. */
+#undef VERIFY_PORT
+
/* Define to the NSD version to answer version.server query. */
#undef VERSION
dnstap-log-auth-response-messages{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_DNSTAP_LOG_AUTH_RESPONSE_MESSAGES; }
log-time-ascii{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_LOG_TIME_ASCII;}
round-robin{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_ROUND_ROBIN;}
-minimal-responses{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_MINIMAL_RESPONSES;}
-confine-to-zone{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_CONFINE_TO_ZONE;}
+minimal-responses{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_MINIMAL_RESPONSES;}
+confine-to-zone{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_CONFINE_TO_ZONE;}
refuse-any{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_REFUSE_ANY;}
max-refresh-time{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_MAX_REFRESH_TIME;}
min-refresh-time{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_MIN_REFRESH_TIME;}
cookie-secret-file{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_COOKIE_SECRET_FILE;}
xfrd-tcp-max{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_XFRD_TCP_MAX;}
xfrd-tcp-pipeline{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_XFRD_TCP_PIPELINE;}
+verify{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_VERIFY; }
+enable{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_ENABLE; }
+verify-zone{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_VERIFY_ZONE; }
+verify-zones{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_VERIFY_ZONES; }
+verifier{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_VERIFIER; }
+verifier-count{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_VERIFIER_COUNT; }
+verifier-feed-zone{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_VERIFIER_FEED_ZONE; }
+verifier-timeout{COLON} { LEXOUT(("v(%s) ", yytext)); return VAR_VERIFIER_TIMEOUT; }
{NEWLINE} { LEXOUT(("NL\n")); cfg_parser->line++;}
servers={UNQUOTEDLETTER}* {
static int parse_expire_expr(const char *str, long long *num, uint8_t *expr);
static int parse_number(const char *str, long long *num);
static int parse_range(const char *str, long long *low, long long *high);
+
+struct component {
+ struct component *next;
+ char *str;
+};
+
%}
%union {
struct ip_address_option *ip;
struct range_option *range;
struct cpu_option *cpu;
+ char **strv;
+ struct component *comp;
}
%token <str> STRING
%type <ip> ip_address
%type <llng> service_cpu_affinity
%type <cpu> cpus
+%type <strv> command
+%type <comp> arguments
/* server */
%token VAR_SERVER
%token VAR_BINDTODEVICE
%token VAR_SETFIB
+/* verify */
+%token VAR_VERIFY
+%token VAR_ENABLE
+%token VAR_VERIFY_ZONE
+%token VAR_VERIFY_ZONES
+%token VAR_VERIFIER
+%token VAR_VERIFIER_COUNT
+%token VAR_VERIFIER_FEED_ZONE
+%token VAR_VERIFIER_TIMEOUT
+
%%
blocks:
| key
| tls_auth
| pattern
- | zone ;
+ | zone
+ | verify ;
server:
VAR_SERVER server_block ;
{
cfg_parser->pattern->create_ixfr = $2;
cfg_parser->pattern->create_ixfr_is_default = 0;
+ }
+ | VAR_VERIFY_ZONE boolean
+ { cfg_parser->pattern->verify_zone = $2; }
+ | VAR_VERIFIER command
+ { cfg_parser->pattern->verifier = $2; }
+ | VAR_VERIFIER_FEED_ZONE boolean
+ { cfg_parser->pattern->verifier_feed_zone = $2; }
+ | VAR_VERIFIER_TIMEOUT number
+ { cfg_parser->pattern->verifier_timeout = $2; } ;
+
+verify:
+ VAR_VERIFY verify_block ;
+
+verify_block:
+ verify_block verify_option | ;
+
+verify_option:
+ VAR_ENABLE boolean
+ { cfg_parser->opt->verify_enable = $2; }
+ | VAR_IP_ADDRESS ip_address
+ {
+ struct ip_address_option *ip = cfg_parser->opt->verify_ip_addresses;
+ if(!ip) {
+ cfg_parser->opt->verify_ip_addresses = $2;
+ } else {
+ while(ip->next) { ip = ip->next; }
+ ip->next = $2;
+ }
+ }
+ | VAR_PORT number
+ {
+ /* port number, stored as a string */
+ char buf[16];
+ (void)snprintf(buf, sizeof(buf), "%lld", $2);
+ cfg_parser->opt->verify_port = region_strdup(cfg_parser->opt->region, buf);
+ }
+ | VAR_VERIFY_ZONES boolean
+ { cfg_parser->opt->verify_zones = $2; }
+ | VAR_VERIFIER command
+ { cfg_parser->opt->verifier = $2; }
+ | VAR_VERIFIER_COUNT number
+ { cfg_parser->opt->verifier_count = (int)$2; }
+ | VAR_VERIFIER_TIMEOUT number
+ { cfg_parser->opt->verifier_timeout = (int)$2; }
+ | VAR_VERIFIER_FEED_ZONE boolean
+ { cfg_parser->opt->verifier_feed_zone = $2; } ;
+
+command:
+ STRING arguments
+ {
+ char **argv;
+ size_t argc = 1;
+ for(struct component *i = $2; i; i = i->next) {
+ argc++;
+ }
+ argv = region_alloc_zero(
+ cfg_parser->opt->region, (argc + 1) * sizeof(char *));
+ argc = 0;
+ argv[argc++] = $1;
+ for(struct component *j, *i = $2; i; i = j) {
+ j = i->next;
+ argv[argc++] = i->str;
+ region_recycle(cfg_parser->opt->region, i, sizeof(*i));
+ }
+ $$ = argv;
+ } ;
+
+arguments:
+ { $$ = NULL; }
+ | arguments STRING
+ {
+ struct component *comp = region_alloc_zero(
+ cfg_parser->opt->region, sizeof(*comp));
+ comp->str = region_strdup(cfg_parser->opt->region, $2);
+ if($1) {
+ struct component *tail = $1;
+ while(tail->next) {
+ tail = tail->next;
+ }
+ tail->next = comp;
+ $$ = $1;
+ } else {
+ $$ = comp;
+ }
} ;
ip_address:
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.69 for NSD 4.5.0.
+# Generated by GNU Autoconf 2.69 for NSD 4.6.0.
#
# Report bugs to <nsd-bugs@nlnetlabs.nl>.
#
# Identity of this package.
PACKAGE_NAME='NSD'
PACKAGE_TARNAME='nsd'
-PACKAGE_VERSION='4.5.0'
-PACKAGE_STRING='NSD 4.5.0'
+PACKAGE_VERSION='4.6.0'
+PACKAGE_STRING='NSD 4.6.0'
PACKAGE_BUGREPORT='nsd-bugs@nlnetlabs.nl'
PACKAGE_URL=''
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures NSD 4.5.0 to adapt to many kinds of systems.
+\`configure' configures NSD 4.6.0 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of NSD 4.5.0:";;
+ short | recursive ) echo "Configuration of NSD 4.6.0:";;
esac
cat <<\_ACEOF
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
-NSD configure 4.5.0
+NSD configure 4.6.0
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by NSD $as_me 4.5.0, which was
+It was created by NSD $as_me 4.6.0, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
_ACEOF
+cat >>confdefs.h <<_ACEOF
+#define VERIFY_PORT "5347"
+_ACEOF
+
+
facility=LOG_DAEMON
# Check whether --with-facility was given.
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
-This file was extended by NSD $as_me 4.5.0, which was
+This file was extended by NSD $as_me 4.6.0, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
-NSD config.status 4.5.0
+NSD config.status 4.6.0
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"
sinclude(acx_nlnetlabs.m4)
sinclude(dnstap/dnstap.m4)
-AC_INIT([NSD],[4.5.0],[nsd-bugs@nlnetlabs.nl])
+AC_INIT([NSD],[4.6.0],[nsd-bugs@nlnetlabs.nl])
AC_CONFIG_HEADERS([config.h])
#
AC_DEFINE_UNQUOTED([MAXSYSLOGMSGLEN], [512], [Define to the maximum message length to pass to syslog.])
AC_DEFINE_UNQUOTED([NSD_CONTROL_PORT], [8952], [Define to the default nsd-control port.])
AC_DEFINE_UNQUOTED([NSD_CONTROL_VERSION], [1], [Define to nsd-control proto version.])
+AC_DEFINE_UNQUOTED([VERIFY_PORT], ["5347"], [Define to the default zone verification udp port.])
dnl
dnl Determine the syslog facility to use
zone->zonestatid = 0;
zone->is_secure = 0;
zone->is_changed = 0;
+ zone->is_updated = 0;
+ zone->is_skipped = 0;
+ zone->is_checked = 0;
+ zone->is_bad = 0;
zone->is_ok = 1;
return zone;
}
fclose(df);
}
+void
+diff_update_commit(
+ const char* zone, uint8_t commit, struct nsd* nsd, uint64_t filenumber)
+{
+ FILE *df;
+
+ assert(zone != NULL);
+ assert(nsd != NULL);
+ assert(commit == DIFF_NOT_COMMITTED ||
+ commit == DIFF_COMMITTED ||
+ commit == DIFF_CORRUPT ||
+ commit == DIFF_INCONSISTENT ||
+ commit == DIFF_VERIFIED);
+
+ df = xfrd_open_xfrfile(nsd, filenumber, "r+");
+ if(!df) {
+ log_msg(LOG_ERR, "could not open transfer %s file %lld: %s",
+ zone, (long long)filenumber, strerror(errno));
+ return;
+ }
+ if(!write_32(df, DIFF_PART_XFRF) || !write_8(df, commit)) {
+ log_msg(LOG_ERR, "could not write transfer %s file %lld: %s",
+ zone, (long long)filenumber, strerror(errno));
+ fclose(df);
+ return;
+ }
+ fflush(df);
+ fclose(df);
+}
+
int
diff_read_64(FILE *in, uint64_t* result)
{
int
diff_read_8(FILE *in, uint8_t* result)
{
- if (fread(result, sizeof(*result), 1, in) == 1) {
- return 1;
- } else {
- return 0;
- }
+ if (fread(result, sizeof(*result), 1, in) == 1) {
+ return 1;
+ } else {
+ return 0;
+ }
}
int
return i;
}
}
- /* this is odd. Log why rr cannot be found. */
+ /* this is odd. Log why rr cannot be found. */
if (!add) {
debug_find_rr_num(rrset, type, klass, rdatas, rdata_num);
}
uint8_t committed;
uint32_t i;
int num_bytes = 0;
+ (void)last_task;
assert(zonedb);
/* read zone name and serial */
zone_buf, domain_to_string(zonedb->apex));
return 0;
}
- if(!committed) {
+ switch(committed) {
+ case DIFF_NOT_COMMITTED:
log_msg(LOG_ERR, "diff file %s was not committed", zone_buf);
return 0;
+ case DIFF_CORRUPT:
+ log_msg(LOG_ERR, "diff file %s was corrupt", zone_buf);
+ return 0;
+ case DIFF_INCONSISTENT:
+ log_msg(LOG_ERR, "diff file %s was inconsistent", zone_buf);
+ return 0;
+ case DIFF_VERIFIED:
+ log_msg(LOG_INFO, "diff file %s already verified", zone_buf);
+ break;
+ default:
+ break;
}
if(num_parts == 0) {
log_msg(LOG_ERR, "diff file %s was not completed", zone_buf);
return 1;
}
- if(committed)
+ if(!zonedb->is_skipped)
{
int is_axfr=0, delete_mode=0, rr_count=0, softfail=0;
const dname_type* apex = domain_dname_const(zonedb->apex);
memset(&z, 0, sizeof(z)); /* if udb==NULL, have &z defined */
if(nsd->db->udb) {
if(udb_base_get_userflags(nsd->db->udb) != 0) {
+ diff_update_commit(
+ zone_buf, DIFF_CORRUPT, nsd, xfrfilenr);
log_msg(LOG_ERR, "database corrupted, cannot update");
- xfrd_unlink_xfrfile(nsd, xfrfilenr);
exit(1);
}
/* all parts were checked by xfrd before commit */
assert(zonedb);
if(ret == 0) {
log_msg(LOG_ERR, "bad ixfr packet part %d in diff file for %s", (int)i, zone_buf);
- xfrd_unlink_xfrfile(nsd, xfrfilenr);
+ diff_update_commit(
+ zone_buf, DIFF_CORRUPT, nsd, xfrfilenr);
/* the udb is still dirty, it is bad */
exit(1);
} else if(ret == 2) {
if(zonedb) prehash_zone(nsd->db, zonedb);
#endif /* NSEC3 */
zonedb->is_changed = 1;
+ zonedb->is_updated = 1;
+ zonedb->is_checked = (committed == DIFF_VERIFIED);
if(nsd->db->udb) {
assert(z.base);
ZONE(&z)->is_changed = 1;
+ /* FIXME: need to set is_updated here? */
ZONE(&z)->mtime = time_end_0;
ZONE(&z)->mtime_nsec = time_end_1*1000;
udb_zone_set_log_str(nsd->db->udb, &z, log_buf);
"Zone %s contents is different from master, "
"starting AXFR. Transfer %s", zone_buf, log_buf);
/* add/del failures in IXFR, get an AXFR */
- task_new_soainfo(taskudb, last_task, zonedb, 1);
- } else {
- if(taskudb)
- task_new_soainfo(taskudb, last_task, zonedb, 0);
+ diff_update_commit(
+ zone_buf, DIFF_INCONSISTENT, nsd, xfrfilenr);
+ exit(1);
}
if(ixfr_store)
ixfr_store_finish(ixfr_store, nsd, log_buf);
struct udb_base* task_file_create(const char* file)
{
- return udb_base_create_new(file, &namedb_walkfunc, NULL);
+ return udb_base_create_new(file, &namedb_walkfunc, NULL);
}
static int
}
void task_new_soainfo(struct udb_base* udb, udb_ptr* last, struct zone* z,
- int gone)
+ enum soainfo_hint hint)
{
/* calculate size */
udb_ptr e;
domain_to_string(z->apex)));
apex = domain_dname(z->apex);
sz = sizeof(struct task_list_d) + dname_total_size(apex);
- if(z->soa_rrset && !gone) {
+ if(z->soa_rrset && hint == soainfo_ok) {
ns = domain_dname(rdata_atom_domain(
z->soa_rrset->rrs[0].rdatas[0]));
em = domain_dname(rdata_atom_domain(
return;
}
TASKLIST(&e)->task_type = task_soa_info;
+ TASKLIST(&e)->yesno = (uint64_t)hint;
- if(z->soa_rrset && !gone) {
+ if(z->soa_rrset && hint == soainfo_ok) {
uint32_t ttl = htonl(z->soa_rrset->rrs[0].ttl);
uint8_t* p = (uint8_t*)TASKLIST(&e)->zname;
p += dname_total_size(apex);
if(!zone) {
/* assume the zone has been deleted and a zone transfer was
* still waiting to be processed */
- xfrd_unlink_xfrfile(nsd, TASKLIST(task)->yesno);
return;
}
+
/* apply the XFR */
/* oldserial, newserial, yesno is filenumber */
df = xfrd_open_xfrfile(nsd, TASKLIST(task)->yesno, "r");
if(!df) {
/* could not open file to update */
- /* there is no reply to xfrd failed-update,
- * because xfrd has a scan for apply-failures. */
- xfrd_unlink_xfrfile(nsd, TASKLIST(task)->yesno);
+ /* soainfo_gone will be communicated from server_reload, unless
+ preceding updates have been applied */
+ zone->is_skipped = 1;
return;
}
/* read and apply zone transfer */
if(!apply_ixfr_for_zone(nsd, zone, df, nsd->options, udb,
last_task, TASKLIST(task)->yesno)) {
- /* there is no reply to xfrd failed-update,
- * because xfrd has a scan for apply-failures. */
+ /* soainfo_gone will be communicated from server_reload, unless
+ preceding updates have been applied */
+ zone->is_skipped = 1;
}
fclose(df);
- xfrd_unlink_xfrfile(nsd, TASKLIST(task)->yesno);
}
#define DIFF_PART_XXFR ('X'<<24 | 'X'<<16 | 'F'<<8 | 'R')
#define DIFF_PART_XFRF ('X'<<24 | 'F'<<16 | 'R'<<8 | 'F')
+#define DIFF_NOT_COMMITTED (0u) /* XFR not (yet) committed to disk */
+#define DIFF_COMMITTED (1u<<0) /* XFR committed to disk */
+#define DIFF_CORRUPT (1u<<1) /* XFR corrupt */
+#define DIFF_INCONSISTENT (1u<<2) /* IXFR cannot be applied */
+#define DIFF_VERIFIED (1u<<3) /* XFR already verified */
+
/* write an xfr packet data to the diff file, type=IXFR.
The diff file is created if necessary, with initial header(notcommitted). */
void diff_write_packet(const char* zone, const char* pat, uint32_t old_serial,
uint32_t new_serial, uint32_t num_parts, uint8_t commit,
const char* log_msg, struct nsd* nsd, uint64_t filenumber);
+/*
+ * Overwrite committed value of diff file with discarded to ensure diff
+ * file is not reapplied on reload.
+ */
+void diff_update_commit(const char* zone,
+ uint8_t commit, struct nsd* nsd, uint64_t filenumber);
+
/*
* These functions read parts of the diff file.
*/
buffer_type* packet, size_t rdatalen, zone_type *zone,
struct udb_ptr* udbz, int* softfail);
+enum soainfo_hint {
+ soainfo_ok,
+ soainfo_gone,
+ soainfo_bad
+};
+
/* task udb structure */
struct task_list_d {
/** next task in list */
} task_type;
uint32_t size; /* size of this struct */
- /** soainfo: zonename dname, soaRR wireform */
+ /** soainfo: zonename dname, soaRR wireform, yesno is soainfo_hint */
/** expire: zonename, boolyesno */
/** apply_xfr: zonename, serials, yesno is filenamecounter */
uint32_t oldserial, newserial;
void task_remap(udb_base* udb);
void task_process_sync(udb_base* udb);
void task_clear(udb_base* udb);
-void task_new_soainfo(udb_base* udb, udb_ptr* last, struct zone* z, int gone);
+void task_new_soainfo(udb_base* udb, udb_ptr* last, struct zone* z, enum soainfo_hint hint);
void task_new_expire(udb_base* udb, udb_ptr* last,
const struct dname* z, int expired);
void* task_new_stat_info(udb_base* udb, udb_ptr* last, struct nsdst* stat,
+23 June 2022: Wouter
+ - Tag for 4.6.0rc1.
+
+17 June 2022: Wouter
+ - Fix compilation with libev, without event_base_loopbreak.
+
+16 June 2022: Wouter
+ - Fix that the unit test verify_repat cleans up nsd on exit.
+ - Fix to remove ixfrcreate.c asserts about uint16 within limits
+ because of warnings from analyzers.
+
+14 June 2022: Wouter
+ - Fix compilation without libevent and compilation of nsd-mem.
+ - Fix verify handler add of sigchld event for compilation without
+ libevent.
+
+3 June 2022: Wouter
+ - Fix static analyzer reports on ixfrcreate temp file.
+ - Fixup wrong ixfrcreate fread return check.
+
+13 May 2022: Wouter
+ - The code repo continues with version 4.5.1.
+
6 May 2022: Wouter
- Merge PR #209: IXFR out
This adds IXFR out functionality to NSD. NSD can copy IXFRs from
3.5 ... Diagnosing NSD log entries
3.6 ... Interfaces
3.7 ... Tuning
+3.8 ... Zone verification
4.0 Support and Feedback
4.1 ... Your Support
1.0 Introduction
-This is NSD Name Server Daemon (NSD) version 4.5.0.
+This is NSD Name Server Daemon (NSD) version 4.6.0.
The NLnet Labs Name Server Daemon (NSD) is an authoritative RFC compliant
DNS nameserver. It was first conceived to allow for more genetic
1.2 Quick build and install
-Step 1: Unpack the source with gtar -xzvf nsd-4.5.0.tar.gz
+Step 1: Unpack the source with gtar -xzvf nsd-4.6.0.tar.gz
Step 2: Create user nsd or any other unprivileged user of your
choice. In case of later make sure to use
Use your favorite combination of tar and gnu zip to unpack the source,
for example
-$ gtar -xzvf nsd-4.5.0.tar.gz
+$ gtar -xzvf nsd-4.6.0.tar.gz
-will unpack the source into the ./nsd-4.5.0 directory...
+will unpack the source into the ./nsd-4.6.0 directory...
2.2 Configuring NSD
FreeBSD.
+3.8 Zone verification
+
+NSD can be configured to verify a zone is correct before publishing it. This
+feature is primarily aimed at fortifying DNSSEC in the DNS
+notify/transfer-chain, but can be used to carry out any checks desired.
+
+An external verifier can be configured per zone. When a zone with verification
+enabled is received or updated via an (incremental) zone transfer, it will be
+submitted to the verifier for evaluation. If the verifier deems the updated
+zone correct (indicated with exit status 0), the zone will be served. NSD will
+discard the update and continue to serve the zone before the update if the
+exit status of the verifier is non-zero.
+
+Verifier options can be configured globally in the "verify:" clause, or
+specifically for a zone/pattern in the respective "zone:" and "pattern:"
+clauses. The global values are applied by default.
+
+The zone can be provided to the verifier in two ways.
+
+ 1. The complete zone can be fed to the standard input of the verifier.
+
+ This modus operandi is enabled by default and can be configured
+ with the "verifier-feed-zone:" option.
+
+ Examples for verifiers that read from the standard input are:
+ "ldns-verify-zone -V2" (-V2 to suppress copying to stdout) or
+ "validns -" (don't forget the dash (-) to read the zone from stdin).
+
+ 2. The zone can be served to the verifier.
+
+ This is disabled by default and can be enabled by configuring ip-
+ addresses, with the "ip-address:" option in the "verify:" clause,
+ on which the zone to be assessed will be served. Addresses can
+ contain a port number to override the default, which is 5347 by
+ default, but can be overridden with the "port:" option in the
+ verify clause.
+
+ For example to validate the SOA of a zone example.com by querying,
+ with a certain DS record as the trust anchor (in file example.com.ds),
+ the "verifier:" option could have the following value:
+ "drill -S -k example.com.ds @localhost -p 5347 example.com SOA"
+
+A verifier is informed about the domain name of the zone to be verified and
+the accessibility of the system submitting the zone via environment variables.
+
+ VERIFY_ZONE
+ Domain name of the zone to be verified.
+
+ VERIFY_ZONE_ON_STDIN
+ Contains "yes" if the zone is fed over standard input,
+ otherwise "no".
+
+ VERIFY_IP_ADDRESSES
+ Contains a list of <ip-address>@<port>s on which the zone
+ to be verified can be queried.
+
+ VERIFY_IPV6_ADDRESS and VERIFY_IPV6_PORT
+ Contains the first configured IPv6 address and port.
+
+ VERIFY_IPV4_ADDRESS and VERIFY_IPV4_PORT
+ Contains the first configured IPv4 address and port.
+
+ VERIFY_IP_ADDRESS and VERIFY_PORT
+ Contains the first configured address and port.
+ IPv6 is preferred over IPv4.
+
+For each zone one verifier will be run at the same time, but when multiple
+to-be-verified zones are received, multiple verifiers may be run
+simultaneously. The number of verifiers that may be run simultaneously is
+configured with the "verifier-count:" option in the "verify:" clause and
+defaults to 1.
+
+The time a verifier may take can be configured with the "verifier-timeout:"
+option in the "verify:" clause (to make the general default) or in the "zone:"
+or "pattern:" clause to set it for a specific zone. When the time the verifier
+takes exceeds the timeout value, the zone to be verified will be considered
+bad. By default the value is 0, which means that the verifier may take as long
+as it needs.
+
+To enable verification for all zones.
+
+ verify:
+ enable: yes
+ verifier: <command>
+
+To enable verification only for a specific zone.
+
+ verify:
+ enable: yes
+ verify-zones: no
+
+ zone:
+ name: example.com
+ verify-zone: yes
+
+
4.0 Support and Feedback
NLnet Labs is committed to support NSD and its other software products on
see http://www.nlnetlabs.nl/labs/contributors/.
-$Id: README,v 1.3 2022/06/30 08:21:37 florian Exp $
+$Id: README,v 1.4 2022/06/30 10:49:39 florian Exp $
NSD RELEASE NOTES
+4.6.0
+================
+FEATURES:
+ - Port zone-verification from CreDNS to NSD4.
+BUG FIXES:
+ - Fix static analyzer reports on ixfrcreate temp file.
+ - Fixup wrong ixfrcreate fread return check.
+
4.5.0
================
FEATURES:
task_remap(xfrd->nsd->task[xfrd->nsd->mytask]);
udb_ptr_init(xfrd->last_task, xfrd->nsd->task[xfrd->nsd->mytask]);
assert(udb_base_get_userdata(xfrd->nsd->task[xfrd->nsd->mytask])->data == 0);
-
- xfrd_prepare_zones_for_reload();
+ if(!xfrd->reload_cmd_first_sent)
+ xfrd->reload_cmd_first_sent = xfrd_time();
xfrd->reload_cmd_last_sent = xfrd_time();
xfrd->need_to_send_reload = 0;
xfrd->can_send_reload = 0;
DEBUG(DEBUG_IPC,1, (LOG_INFO, "xfrd: main sent shutdown cmd."));
xfrd->shutdown = 1;
break;
+ case NSD_RELOAD_FAILED:
+ xfrd->reload_failed = 1;
+ /* fall through */
case NSD_RELOAD_DONE:
/* reload has finished */
- DEBUG(DEBUG_IPC,1, (LOG_INFO, "xfrd: ipc recv RELOAD_DONE"));
+ DEBUG(DEBUG_IPC,1, (LOG_INFO, "xfrd: ipc recv %s",
+ xfrd->reload_failed ? "RELOAD FAILED" : "RELOAD DONE"));
if(block_read(NULL, handler->ev_fd, &xfrd->reload_pid,
sizeof(pid_t), -1) != sizeof(pid_t)) {
log_msg(LOG_ERR, "xfrd cannot get reload_pid");
xfrd->ipc_send_blocked = 0;
ipc_xfrd_set_listening(xfrd, EV_PERSIST|EV_READ|EV_WRITE);
xfrd_reopen_logfile();
- xfrd_check_failed_updates();
+ if(!xfrd->reload_failed) {
+ xfrd_check_failed_updates();
+ xfrd->reload_cmd_first_sent = 0;
+ } else {
+ /* make reload happen again, right away */
+ xfrd_set_reload_now(xfrd);
+ }
+ xfrd_prepare_zones_for_reload();
+ xfrd->reload_failed = 0;
break;
case NSD_PASS_TO_XFRD:
DEBUG(DEBUG_IPC,1, (LOG_INFO, "xfrd: ipc recv PASS_TO_XFRD"));
if(!spool_dname(out, domain_dname(zone->apex))) {
log_msg(LOG_ERR, "could not write %s: %s",
file_name, strerror(errno));
+ fclose(out);
return 0;
}
if(!spool_u32(out, serial)) {
log_msg(LOG_ERR, "could not write %s: %s",
file_name, strerror(errno));
+ fclose(out);
return 0;
}
if(!spool_domains(out, zone)) {
log_msg(LOG_ERR, "could not write %s: %s",
file_name, strerror(errno));
+ fclose(out);
return 0;
}
fclose(out);
/* read uint16_t from spool */
static int read_spool_u16(FILE* spool, uint16_t* val)
{
- if(!fread(val, sizeof(*val), 1, spool))
+ if(fread(val, sizeof(*val), 1, spool) < 1)
return 0;
return 1;
}
/* read uint32_t from spool */
static int read_spool_u32(FILE* spool, uint32_t* val)
{
- if(!fread(val, sizeof(*val), 1, spool))
+ if(fread(val, sizeof(*val), 1, spool) < 1)
return 0;
return 1;
}
size_t* dname_len)
{
uint16_t len;
- if(!fread(&len, sizeof(len), 1, spool))
+ if(fread(&len, sizeof(len), 1, spool) < 1)
return 0;
if(len > buflen) {
log_msg(LOG_ERR, "dname too long");
return 0;
}
if(len > 0) {
- if(!fread(buf, len, 1, spool))
+ if(fread(buf, len, 1, spool) < 1)
return 0;
}
*dname_len = len;
return 0;
}
/* because rdlen is uint16_t always smaller than sizeof(buf)*/
- if(!fread(buf, rdlen, 1, spool)) {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wtype-limits"
+ assert(rdlen <= sizeof(buf));
+#pragma GCC diagnostic pop
+ if(fread(buf, rdlen, 1, spool) < 1) {
log_msg(LOG_ERR, "error reading file %s: %s",
ixfrcr->file_name, strerror(errno));
return 0;
return 0;
}
/* because rdlen is uint16_t always smaller than sizeof(buf)*/
- if(!fread(buf, rdlen, 1, spool)) {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wtype-limits"
+ assert(rdlen <= sizeof(buf));
+#pragma GCC diagnostic pop
+ if(fread(buf, rdlen, 1, spool) < 1) {
log_msg(LOG_ERR, "error reading file %s: %s",
ixfrcr->file_name, strerror(errno));
return 0;
ixfrcr->file_name, strerror(errno));
return 0;
}
+ if(spool_type_count > sizeof(marktypes)) {
+ log_msg(LOG_ERR, "error reading file %s: spool type count "
+ "too large", ixfrcr->file_name);
+ return 0;
+ }
for(i=0; i<spool_type_count; i++) {
uint16_t tp, kl, rrcount;
struct rrset* rrset;
ixfrcr->file_name, strerror(errno));
return 0;
}
+ /* The rrcount is within limits of sizeof(marktypes), because
+ * the uint16_t < 65536 */
rrset = domain_find_rrset(domain, zone, tp);
if(!rrset) {
/* rrset in spool but not in new zone, deleted RRset */
ixfrcr->file_name, strerror(errno));
return 0;
}
+ if(spool_type_count > 65536) {
+ log_msg(LOG_ERR, "error reading file %s: del RR spool type "
+ "count too large", ixfrcr->file_name);
+ return 0;
+ }
for(i=0; i<spool_type_count; i++) {
uint16_t tp, kl, rrcount;
if(!read_spool_u16(spool, &tp) ||
ixfrcr->file_name, strerror(errno));
return 0;
}
+ /* The rrcount is within reasonable limits, because
+ * the uint16_t < 65536 */
if(!process_spool_delrrset(spool, ixfrcr, store, dname,
dname_len, tp, kl, rrcount))
return 0;
if(!zone->opts->pattern->create_ixfr)
return 0;
/* only if there is a zone in memory to compare with */
- if(!zone || !zone->soa_rrset || !zone->apex)
+ if(!zone->soa_rrset || !zone->apex)
return 0;
old_serial = zone_get_current_serial(zone);
return 0;
}
+int event_base_loopbreak(struct event_base * base)
+{
+ return event_base_loopexit(base, NULL);
+}
+
/* free event base, free events yourself */
void
event_base_free(struct event_base* base)
int event_base_dispatch(struct event_base *);
/** exit that loop */
int event_base_loopexit(struct event_base *, struct timeval *);
+/** exit loop */
+int event_base_loopbreak(struct event_base *);
/** run select once */
#define EVLOOP_ONCE 1
int event_base_loop(struct event_base* base, int flags);
struct timespec mtime; /* time of last modification */
unsigned zonestatid; /* array index for zone stats */
unsigned is_secure : 1; /* zone uses DNSSEC */
- unsigned is_ok : 1; /* zone has not expired. */
- unsigned is_changed : 1; /* zone was changed by AXFR */
+ unsigned is_ok : 1; /* zone has not expired */
+ unsigned is_changed : 1; /* zone changes must be written to disk */
+ unsigned is_updated : 1; /* zone was changed by XFR */
+ unsigned is_skipped : 1; /* subsequent zone updates are skipped */
+ unsigned is_checked : 1; /* zone already verified */
+ unsigned is_bad : 1; /* zone failed verification */
} ATTR_PACKED;
/* a RR in DNS */
-.TH "nsd\-checkconf" "8" "May 13, 2022" "NLnet Labs" "nsd 4.5.0"
+.TH "nsd\-checkconf" "8" "Jun 30, 2022" "NLnet Labs" "nsd 4.6.0"
.\" Copyright (c) 2001\-2008, NLnet Labs. All rights reserved.
.\" See LICENSE for the license.
.SH "NAME"
printf("\tixfr-size: %u\n", (unsigned)pat->ixfr_size);
if(!pat->create_ixfr_is_default)
printf("\tcreate-ixfr: %s\n", pat->create_ixfr?"yes":"no");
+ if(pat->verify_zone != VERIFY_ZONE_INHERIT) {
+ printf("\tverify-zone: ");
+ if(pat->verify_zone) {
+ printf("yes\n");
+ } else {
+ printf("no\n");
+ }
+ }
+ if(pat->verifier) {
+ printf("\tverifier:");
+ for(char *const *s = pat->verifier; *s; s++) {
+ printf(" \"%s\"", *s);
+ }
+ printf("\n");
+ }
+ if(pat->verifier_feed_zone != VERIFIER_FEED_ZONE_INHERIT) {
+ printf("\tverifier-feed-zone: ");
+ if(pat->verifier_feed_zone) {
+ printf("yes\n");
+ } else {
+ printf("no\n");
+ }
+ }
+ if(pat->verifier_timeout != VERIFIER_TIMEOUT_INHERIT) {
+ printf("\tverifier-timeout: %d\n", pat->verifier_timeout);
+ }
}
void
print_string_var("control-key-file:", opt->control_key_file);
print_string_var("control-cert-file:", opt->control_cert_file);
+ printf("\nverify:\n");
+ printf("\tenable: %s\n", opt->verify_enable?"yes":"no");
+ for(ip = opt->verify_ip_addresses; ip; ip=ip->next) {
+ print_string_var("ip-address:", ip->address);
+ }
+ printf("\tport: %s\n", opt->verify_port);
+ printf("\tverify-zones: %s\n", opt->verify_zones?"yes":"no");
+ if(opt->verifier) {
+ printf("\tverifier:");
+ for(char **s = opt->verifier; *s; s++) {
+ printf(" \"%s\"", *s);
+ }
+ printf("\n");
+ }
+ printf("\tverifier-count: %d\n", opt->verifier_count);
+ printf("\tverifier-feed-zone: %s\n", opt->verifier_feed_zone?"yes":"no");
+ printf("\tverifier-timeout: %d\n", opt->verifier_timeout);
+
RBTREE_FOR(key, key_options_type*, opt->keys)
{
printf("\nkey:\n");
-.TH "nsd\-checkzone" "8" "May 13, 2022" "NLnet Labs" "nsd 4.5.0"
+.TH "nsd\-checkzone" "8" "Jun 30, 2022" "NLnet Labs" "nsd 4.6.0"
.\" Copyright (c) 2014, NLnet Labs. All rights reserved.
.\" See LICENSE for the license.
.SH "NAME"
-.TH "nsd\-control" "8" "May 13, 2022" "NLnet Labs" "nsd 4.5.0"
+.TH "nsd\-control" "8" "Jun 30, 2022" "NLnet Labs" "nsd 4.6.0"
.\" Copyright (c) 2011, NLnet Labs. All rights reserved.
.\" See LICENSE for the license.
.SH "NAME"
.TP
.B start
Start the server. Simply execs \fInsd\fR(8). The nsd executable
-is searched for in the \fBPATH\fR set in the environment. It is started
-with the config file specified using \fI\-c\fR or the default config file.
+is not searched for in the \fBPATH\fR set in the environment. Instead the
+default location relative to the installation prefix specified at
+compile-time. The executable location can be overridden by setting
+\fINSD_PATH\fR in the environment. It is started with the config file
+specified using \fI\-c\fR or the default config file.
.TP
.B stop
Stop the server. The server daemon exits.
#include "config.h"
#include <stdio.h>
+#include <stdlib.h>
#ifdef HAVE_SSL
#include <sys/types.h>
#include <unistd.h>
if(argc == 0)
usage();
if(argc >= 1 && strcmp(argv[0], "start")==0) {
- if(execl(NSD_START_PATH, "nsd", "-c", cfgfile,
- (char*)NULL) < 0) {
+ const char *path;
+ if((path = getenv("NSD_PATH")) == NULL) {
+ path = NSD_START_PATH;
+ }
+ if(execl(path, "nsd", "-c", cfgfile, (char*)NULL) < 0) {
fprintf(stderr, "could not exec %s: %s\n",
NSD_START_PATH, strerror(errno));
exit(1);
-.TH "NSD" "8" "May 13, 2022" "NLnet Labs" "NSD 4.5.0"
+.TH "NSD" "8" "Jun 30, 2022" "NLnet Labs" "NSD 4.6.0"
.\" Copyright (c) 2001\-2008, NLnet Labs. All rights reserved.
.\" See LICENSE for the license.
.SH "NAME"
.B nsd
-\- Name Server Daemon (NSD) version 4.5.0.
+\- Name Server Daemon (NSD) version 4.6.0.
.SH "SYNOPSIS"
.B nsd
.RB [ \-4 ]
exit(0);
}
+static void
+setup_verifier_environment(void)
+{
+ size_t i;
+ int ret, ip4, ip6;
+ char *buf, host[NI_MAXHOST], serv[NI_MAXSERV];
+ size_t size, cnt = 0;
+
+ /* allocate large enough buffer to hold a list of all ip addresses.
+ ((" " + INET6_ADDRSTRLEN + "@" + "65535") * n) + "\0" */
+ size = ((INET6_ADDRSTRLEN + 1 + 5 + 1) * nsd.verify_ifs) + 1;
+ buf = xalloc(size);
+
+ ip4 = ip6 = 0;
+ for(i = 0; i < nsd.verify_ifs; i++) {
+ ret = getnameinfo(
+ (struct sockaddr *)&nsd.verify_udp[i].addr.ai_addr,
+ nsd.verify_udp[i].addr.ai_addrlen,
+ host, sizeof(host), serv, sizeof(serv),
+ NI_NUMERICHOST | NI_NUMERICSERV);
+ if(ret != 0) {
+ log_msg(LOG_ERR, "error in getnameinfo: %s",
+ gai_strerror(ret));
+ continue;
+ }
+ buf[cnt++] = ' ';
+ cnt += strlcpy(&buf[cnt], host, size - cnt);
+ assert(cnt < size);
+ buf[cnt++] = '@';
+ cnt += strlcpy(&buf[cnt], serv, size - cnt);
+ assert(cnt < size);
+#ifdef INET6
+ if (nsd.verify_udp[i].addr.ai_family == AF_INET6 && !ip6) {
+ setenv("VERIFY_IPV6_ADDRESS", host, 1);
+ setenv("VERIFY_IPV6_PORT", serv, 1);
+ setenv("VERIFY_IP_ADDRESS", host, 1);
+ setenv("VERIFY_PORT", serv, 1);
+ ip6 = 1;
+ } else
+#endif
+ if (!ip4) {
+ assert(nsd.verify_udp[i].addr.ai_family == AF_INET);
+ setenv("VERIFY_IPV4_ADDRESS", host, 1);
+ setenv("VERIFY_IPV4_PORT", serv, 1);
+ if (!ip6) {
+ setenv("VERIFY_IP_ADDRESS", host, 1);
+ setenv("VERIFY_PORT", serv, 1);
+ }
+ ip4 = 1;
+ }
+ }
+
+ setenv("VERIFY_IP_ADDRESSES", &buf[1], 1);
+ free(buf);
+}
+
static void
copyaddrinfo(struct nsd_addrinfo *dest, struct addrinfo *src)
{
static void
figure_default_sockets(
struct nsd_socket **udp, struct nsd_socket **tcp, size_t *ifs,
- const char *udp_port, const char *tcp_port,
+ const char *node, const char *udp_port, const char *tcp_port,
const struct addrinfo *hints)
{
size_t i = 0, n = 1;
#ifdef INET6
if(hints->ai_family == AF_UNSPEC) {
/*
- * With IPv6 we'd like to open two separate sockets,
- * one for IPv4 and one for IPv6, both listening to
- * the wildcard address (unless the -4 or -6 flags are
- * specified).
+ * With IPv6 we'd like to open two separate sockets, one for
+ * IPv4 and one for IPv6, both listening to the wildcard
+ * address (unless the -4 or -6 flags are specified).
*
- * However, this is only supported on platforms where
- * we can turn the socket option IPV6_V6ONLY _on_.
- * Otherwise we just listen to a single IPv6 socket
- * and any incoming IPv4 connections will be
- * automatically mapped to our IPv6 socket.
+ * However, this is only supported on platforms where we can
+ * turn the socket option IPV6_V6ONLY _on_. Otherwise we just
+ * listen to a single IPv6 socket and any incoming IPv4
+ * connections will be automatically mapped to our IPv6
+ * socket.
*/
#ifdef IPV6_V6ONLY
int r;
struct addrinfo *addrs[2] = { NULL, NULL };
- if((r = getaddrinfo(NULL, udp_port, &ai[0], &addrs[0])) == 0 &&
- (r = getaddrinfo(NULL, tcp_port, &ai[1], &addrs[1])) == 0)
+ if((r = getaddrinfo(node, udp_port, &ai[0], &addrs[0])) == 0 &&
+ (r = getaddrinfo(node, tcp_port, &ai[1], &addrs[1])) == 0)
{
(*udp)[i].flags |= NSD_SOCKET_IS_OPTIONAL;
(*udp)[i].fib = -1;
#endif /* INET6 */
*ifs = i + 1;
- setup_socket(&(*udp)[i], NULL, udp_port, &ai[0]);
+ setup_socket(&(*udp)[i], node, udp_port, &ai[0]);
figure_socket_servers(&(*udp)[i], NULL);
- setup_socket(&(*tcp)[i], NULL, tcp_port, &ai[1]);
+ setup_socket(&(*tcp)[i], node, tcp_port, &ai[1]);
figure_socket_servers(&(*tcp)[i], NULL);
}
figure_sockets(
struct nsd_socket **udp, struct nsd_socket **tcp, size_t *ifs,
struct ip_address_option *ips,
- const char *udp_port, const char *tcp_port,
+ const char *node, const char *udp_port, const char *tcp_port,
const struct addrinfo *hints)
{
size_t i = 0;
if(!ips) {
figure_default_sockets(
- udp, tcp, ifs, udp_port, tcp_port, hints);
+ udp, tcp, ifs, node, udp_port, tcp_port, hints);
return;
}
struct addrinfo hints;
const char *udp_port = 0;
const char *tcp_port = 0;
+ const char *verify_port = 0;
const char *configfile = CONFIGFILE;
tcp_port = TCP_PORT;
}
}
+ if(nsd.options->verify_port != 0) {
+ verify_port = nsd.options->verify_port;
+ } else {
+ verify_port = VERIFY_PORT;
+ }
#ifdef BIND8_STATS
if(nsd.st.period == 0) {
nsd.st.period = nsd.options->statistics;
resolve_interface_names(nsd.options);
figure_sockets(&nsd.udp, &nsd.tcp, &nsd.ifs,
- nsd.options->ip_addresses, udp_port, tcp_port, &hints);
+ nsd.options->ip_addresses, NULL, udp_port, tcp_port, &hints);
+
+ if(nsd.options->verify_enable) {
+ figure_sockets(&nsd.verify_udp, &nsd.verify_tcp, &nsd.verify_ifs,
+ nsd.options->verify_ip_addresses, "localhost", verify_port, verify_port, &hints);
+ setup_verifier_environment();
+ }
/* Parse the username into uid and gid */
nsd.gid = getgid();
-.TH "nsd.conf" "5" "May 13, 2022" "NLnet Labs" "nsd 4.5.0"
+.TH "nsd.conf" "5" "Jun 30, 2022" "NLnet Labs" "nsd 4.6.0"
.\" Copyright (c) 2001\-2008, NLnet Labs. All rights reserved.
.\" See LICENSE for the license.
.SH "NAME"
# Transfers over TLS (XoT). Default is "" (default verify locations).
# tls-cert-bundle: "path/to/ca-bundle.pem"
+verify:
+ # Enable zone verification. Default is no.
+ # enable: no
+
+ # Port to answer verifier queries on. Default is 5347.
+ # port: 5347
+
+ # Interfaces to bind for zone verification (default are the localhost
+ # interfaces, usually 127.0.0.1 and ::1). To bind to to multiple IP
+ # addresses, list them one by one. Socket options cannot be specified
+ # for verify ip-address options.
+ # ip-address: 127.0.0.1
+ # ip-address: 127.0.0.1@5347
+ # ip-address: ::1
+
+ # Verify zones by default. Default is yes.
+ # verify-zones: yes
+
+ # Command to execute for zone verification.
+ # verifier: ldns-verify-zone
+ # verifier: validns -
+ # verifier: drill -k <keyfile> @127.0.0.1 -p 5347 example.com SOA
+
+ # Maximum number of verifiers to run concurrently. Default is 1.
+ # verifier-count: 1
+
+ # Feed updated zone to verifier over standard input. Default is yes.
+ # verifier-feed-zone: yes
+
+ # Number of seconds before verifier is killed (0 is forever).
+ # verifier-timeout: 0
+
# DNSTAP config section, if compiled with that
# dnstap:
# set this to yes and set one or more of dnstap-log-..-messages to yes.
# (which master to request from first, which slave to notify first).
#include-pattern: "common-masters"
+ # Verify zone before publishing.
+ # Default is value of verify-zones in verify.
+ # verify-zone: yes
+
+ # Command to execute for zone verification.
+ # Default is verifier in verify.
+ # verifier: ldns-verify-zone
+ # verifier: validns -
+ # verifier: drill -k <keyfile> @127.0.0.1 -p 5347 example.com SOA
+
+ # Feed updated zone to verifier over standard input.
+ # Default is value of verifier-feed-zone in verify.
+ # verifier-feed-zone: yes
+
+ # Number of seconds before verifier is killed (0 is forever).
+ # Default is verifier-timeout in verify.
+ # verifier-timeout: 0
# Fixed zone entries. Here you can config zones that cannot be deleted.
# Zones that are dynamically added and deleted are put in the zonelist file.
* port53 is free when all of nsd's processes have exited at shutdown time
*/
#define NSD_QUIT_CHILD 11
+/*
+ * This is the exit code of a nsd "new master" child process to indicate to
+ * the master process that some zones failed verification and that it should
+ * reload again, reprocessing the difffiles. The master process will resend
+ * the command to xfrd so it will not reload from xfrd yet.
+ */
+#define NSD_RELOAD_FAILED 14
#define NSD_SERVER_MAIN 0x0U
#define NSD_SERVER_UDP 0x1U
/* UDP specific configuration (array size ifs) */
struct nsd_socket* udp;
+ /* Interfaces used for zone verification */
+ size_t verify_ifs;
+ struct nsd_socket *verify_tcp;
+ struct nsd_socket *verify_udp;
+
+ struct zone *next_zone_to_verify;
+ size_t verifier_count; /* Number of active verifiers */
+ size_t verifier_limit; /* Maximum number of active verifiers */
+ int verifier_pipe[2]; /* Pipe to trigger verifier exit handler */
+ struct verifier *verifiers;
+
edns_data_type edns_ipv4;
#if defined(INET6)
edns_data_type edns_ipv6;
opt->server_cert_file = CONFIGDIR"/nsd_server.pem";
opt->control_key_file = CONFIGDIR"/nsd_control.key";
opt->control_cert_file = CONFIGDIR"/nsd_control.pem";
+
+ opt->verify_enable = 0;
+ opt->verify_ip_addresses = NULL;
+ opt->verify_port = VERIFY_PORT;
+ opt->verify_zones = 1;
+ opt->verifier = NULL;
+ opt->verifier_count = 1;
+ opt->verifier_feed_zone = 1;
+ opt->verifier_timeout = 0;
+
return opt;
}
p->ixfr_number_is_default = 1;
p->create_ixfr = 0;
p->create_ixfr_is_default = 1;
+ p->verify_zone = VERIFY_ZONE_INHERIT;
+ p->verify_zone_is_default = 1;
+ p->verifier = NULL;
+ p->verifier_feed_zone = VERIFIER_FEED_ZONE_INHERIT;
+ p->verifier_feed_zone_is_default = 1;
+ p->verifier_timeout = VERIFIER_TIMEOUT_INHERIT;
+ p->verifier_timeout_is_default = 1;
+
return p;
}
}
}
+static void
+verifier_delete(region_type* region, char **v)
+{
+ if(v != NULL) {
+ size_t vc = 0;
+ for(vc = 0; v[vc] != NULL; vc++)
+ region_recycle(region, v[vc], strlen(v[vc]) + 1);
+ region_recycle(region, v, (vc + 1) * sizeof(char *));
+ }
+}
+
void
pattern_options_remove(struct nsd_options* opt, const char* name)
{
acl_list_delete(opt->region, p->provide_xfr);
acl_list_delete(opt->region, p->allow_query);
acl_list_delete(opt->region, p->outgoing_interface);
+ verifier_delete(opt->region, p->verifier);
region_recycle(opt->region, p, sizeof(struct pattern_options));
}
}
}
+static void
+copy_changed_verifier(struct nsd_options* opt, char ***ov, char **nv)
+{
+ size_t ovc, nvc;
+ assert(ov != NULL);
+ ovc = nvc = 0;
+ if(nv != NULL) {
+ for(; nv[nvc] != NULL; nvc++) ;
+ } else {
+ verifier_delete(opt->region, *ov);
+ *ov = NULL;
+ return;
+ }
+ if(*ov != NULL) {
+ for(; (*ov)[ovc] != NULL; ovc++) {
+ if(ovc < nvc && strcmp((*ov)[ovc], nv[ovc]) != 0)
+ break;
+ }
+ if(ovc == nvc)
+ return;
+ verifier_delete(opt->region, *ov);
+ *ov = NULL;
+ }
+ *ov = region_alloc(opt->region, (nvc + 1) * sizeof(*nv));
+ for(ovc = 0; nv[ovc] != NULL; ovc++) {
+ (*ov)[ovc] = region_strdup(opt->region, nv[ovc]);
+ }
+ (*ov)[ovc] = NULL;
+ assert(ovc == nvc);
+}
+
static void
copy_pat_fixed(region_type* region, struct pattern_options* orig,
struct pattern_options* p)
orig->ixfr_number_is_default = p->ixfr_number_is_default;
orig->create_ixfr = p->create_ixfr;
orig->create_ixfr_is_default = p->create_ixfr_is_default;
+ orig->verify_zone = p->verify_zone;
+ orig->verify_zone_is_default = p->verify_zone_is_default;
+ orig->verifier_timeout = p->verifier_timeout;
+ orig->verifier_timeout_is_default = p->verifier_timeout_is_default;
+ orig->verifier_feed_zone = p->verifier_feed_zone;
+ orig->verifier_feed_zone_is_default = p->verifier_feed_zone_is_default;
}
void
orig->allow_query = copy_acl_list(opt, p->allow_query);
orig->outgoing_interface = copy_acl_list(opt,
p->outgoing_interface);
+ copy_changed_verifier(opt, &orig->verifier, p->verifier);
nsd_options_insert_pattern(opt, orig);
} else {
/* modify in place so pointers stay valid (and copy
copy_changed_acl(opt, &orig->allow_query, p->allow_query);
copy_changed_acl(opt, &orig->outgoing_interface,
p->outgoing_interface);
+ copy_changed_verifier(opt, &orig->verifier, p->verifier);
}
}
return (struct pattern_options*)rbtree_search(opt->patterns, name);
}
+static int
+pattern_verifiers_equal(const char **vp, const char **vq)
+{
+ size_t vpc, vqc;
+ if(vp == NULL)
+ return vq == NULL;
+ if(vq == NULL)
+ return 0;
+ for(vpc = 0; vp[vpc] != NULL; vpc++) ;
+ for(vqc = 0; vq[vqc] != NULL; vqc++) ;
+ if(vpc != vqc)
+ return 0;
+ for(vpc = 0; vp[vpc] != NULL; vpc++) {
+ assert(vq[vpc] != NULL);
+ if (strcmp(vp[vpc], vq[vpc]) != 0)
+ return 0;
+ }
+ return 1;
+}
+
int
pattern_options_equal(struct pattern_options* p, struct pattern_options* q)
{
if(!booleq(p->ixfr_number_is_default,q->ixfr_number_is_default)) return 0;
if(!booleq(p->create_ixfr,q->create_ixfr)) return 0;
if(!booleq(p->create_ixfr_is_default,q->create_ixfr_is_default)) return 0;
+ if(p->verify_zone != q->verify_zone) return 0;
+ if(!booleq(p->verify_zone_is_default,
+ q->verify_zone_is_default)) return 0;
+ if(!pattern_verifiers_equal((const char **)p->verifier,
+ (const char **)q->verifier)) return 0;
+ if(p->verifier_feed_zone != q->verifier_feed_zone) return 0;
+ if(!booleq(p->verifier_feed_zone_is_default,
+ q->verifier_feed_zone_is_default)) return 0;
+ if(p->verifier_timeout != q->verifier_timeout) return 0;
+ if(!booleq(p->verifier_timeout_is_default,
+ q->verifier_timeout_is_default)) return 0;
return 1;
}
return list;
}
+static void
+marshal_strv(struct buffer* b, char **strv)
+{
+ uint32_t i, n;
+
+ assert(b != NULL);
+
+ if (strv == NULL) {
+ marshal_u32(b, 0);
+ return;
+ }
+ for(n = 0; strv[n]; n++) {
+ /* do nothing */
+ }
+ marshal_u32(b, n);
+ for(i = 0; strv[i] != NULL; i++) {
+ marshal_str(b, strv[i]);
+ }
+ marshal_u8(b, 0);
+}
+
+static char **
+unmarshal_strv(region_type* r, struct buffer* b)
+{
+ uint32_t i, n;
+ char **strv;
+
+ assert(r != NULL);
+ assert(b != NULL);
+
+ if ((n = unmarshal_u32(b)) == 0) {
+ return NULL;
+ }
+ strv = region_alloc_zero(r, (n + 1) * sizeof(char *));
+ for(i = 0; i <= n; i++) {
+ strv[i] = unmarshal_str(r, b);
+ }
+ assert(i == (n + 1));
+ assert(strv[i - 1] == NULL);
+
+ return strv;
+}
+
void
pattern_options_marshal(struct buffer* b, struct pattern_options* p)
{
marshal_u8(b, p->ixfr_number_is_default);
marshal_u8(b, p->create_ixfr);
marshal_u8(b, p->create_ixfr_is_default);
+ marshal_u8(b, p->verify_zone);
+ marshal_u8(b, p->verify_zone_is_default);
+ marshal_strv(b, p->verifier);
+ marshal_u8(b, p->verifier_feed_zone);
+ marshal_u8(b, p->verifier_feed_zone_is_default);
+ marshal_u32(b, p->verifier_timeout);
+ marshal_u8(b, p->verifier_timeout_is_default);
}
struct pattern_options*
p->ixfr_number_is_default = unmarshal_u8(b);
p->create_ixfr = unmarshal_u8(b);
p->create_ixfr_is_default = unmarshal_u8(b);
+ p->verify_zone = unmarshal_u8(b);
+ p->verify_zone_is_default = unmarshal_u8(b);
+ p->verifier = unmarshal_strv(r, b);
+ p->verifier_feed_zone = unmarshal_u8(b);
+ p->verifier_feed_zone_is_default = unmarshal_u8(b);
+ p->verifier_timeout = unmarshal_u32(b);
+ p->verifier_timeout_is_default = unmarshal_u8(b);
return p;
}
copy_and_append_acls(&dest->outgoing_interface, pat->outgoing_interface);
if(pat->multi_master_check)
dest->multi_master_check = pat->multi_master_check;
+
+ if(!pat->verify_zone_is_default) {
+ dest->verify_zone = pat->verify_zone;
+ dest->verify_zone_is_default = 0;
+ }
+ if(!pat->verifier_timeout_is_default) {
+ dest->verifier_timeout = pat->verifier_timeout;
+ dest->verifier_timeout_is_default = 0;
+ }
+ if(!pat->verifier_feed_zone_is_default) {
+ dest->verifier_feed_zone = pat->verifier_feed_zone;
+ dest->verifier_feed_zone_is_default = 0;
+ }
+ if(pat->verifier != NULL) {
+ size_t cnt;
+ char **vec;
+ region_type *region = cfg_parser->opt->region;
+
+ for(cnt = 0; pat->verifier[cnt] != NULL; cnt++) ;
+ vec = region_alloc(region, (cnt + 1) * sizeof(char *));
+ for(cnt = 0; pat->verifier[cnt] != NULL; cnt++) {
+ vec[cnt] = region_strdup(region, pat->verifier[cnt]);
+ }
+ vec[cnt] = NULL;
+ if(dest->verifier != NULL) {
+ size_t size;
+ for(cnt = 0; dest->verifier[cnt] != NULL; cnt++) {
+ size = strlen(dest->verifier[cnt]) + 1;
+ region_recycle(
+ region, dest->verifier[cnt], size);
+ }
+ size = (cnt + 1) * sizeof(char *);
+ region_recycle(region, dest->verifier, size);
+ }
+ dest->verifier = vec;
+ }
}
void
typedef struct tls_auth_options tls_auth_options_type;
typedef struct config_parser_state config_parser_state_type;
+#define VERIFY_ZONE_INHERIT (2)
+#define VERIFIER_FEED_ZONE_INHERIT (2)
+#define VERIFIER_TIMEOUT_INHERIT (-1)
+
/*
* Options global for nsd.
*/
char *cookie_secret;
/** path to cookie secret store */
char const* cookie_secret_file;
+ /** enable verify */
+ int verify_enable;
+ /** list of ip addresses used to serve zones for verification */
+ struct ip_address_option* verify_ip_addresses;
+ /** default port 5347 */
+ char *verify_port;
+ /** verify zones by default */
+ int verify_zones;
+ /** default command to verify zones with */
+ char **verifier;
+ /** maximum number of verifiers that may run simultaneously */
+ int verifier_count;
+ /** whether or not to feed the zone to the verifier over stdin */
+ uint8_t verifier_feed_zone;
+ /** maximum number of seconds that a verifier may take */
+ uint32_t verifier_timeout;
region_type* region;
};
uint8_t ixfr_number_is_default;
uint8_t create_ixfr;
uint8_t create_ixfr_is_default;
+ uint8_t verify_zone;
+ uint8_t verify_zone_is_default;
+ char **verifier;
+ uint8_t verifier_feed_zone;
+ uint8_t verifier_feed_zone_is_default;
+ int32_t verifier_timeout;
+ uint8_t verifier_timeout_is_default;
} ATTR_PACKED;
#define PATTERN_IMPLICIT_MARKER "_implicit_"
}
pid_t popen3(char *const *command,
- FILE **finptr,
- FILE **foutptr,
- FILE **ferrptr)
+ int *fdinptr,
+ int *fdoutptr,
+ int *fderrptr)
{
int err = 0;
int fdin[] = { -1, -1 };
int fdout[] = { -1, -1 };
int fderr[] = { -1, -1 };
int fdsig[] = { -1, -1 };
- FILE *fin, *fout, *ferr;
pid_t pid;
ssize_t discard;
return -1;
}
- fin = fout = ferr = NULL;
-
- if(finptr != NULL && (pipe(fdin) == -1 ||
- (fin = fdopen(fdin[1], "w")) == NULL))
- {
+ if(fdinptr != NULL && pipe(fdin) == -1) {
goto error;
}
- if(foutptr != NULL && (pipe(fdout) == -1 ||
- (fout = fdopen(fdout[0], "r")) == NULL))
- {
+ if(fdoutptr != NULL && pipe(fdout) == -1) {
goto error;
}
- if(ferrptr != NULL && (pipe(fderr) == -1 ||
- (ferr = fdopen(fderr[0], "r")) == NULL))
- {
+ if(fderrptr != NULL && pipe(fderr) == -1) {
goto error;
}
if(pipe(fdsig) == -1 ||
case -1: /* error */
goto error;
case 0: /* child */
- if(ferrptr != NULL) {
+ if(fderrptr != NULL) {
if(dup2(fderr[1], 2) == -1) {
goto error_dup2;
}
} else {
close(2);
}
- if(foutptr != NULL) {
+ if(fdoutptr != NULL) {
if(dup2(fdout[1], 1) == -1) {
goto error_dup2;
}
} else {
close(1);
}
- if(finptr != NULL) {
+ if(fdinptr != NULL) {
if(dup2(fdin[0], 0) == -1) {
goto error_dup2;
}
break;
}
- if(finptr != NULL) {
+ if(fdinptr != NULL) {
close(fdin[0]);
- *finptr = fin;
+ *fdinptr = fdin[1];
}
- if(foutptr != NULL) {
+ if(fdoutptr != NULL) {
close(fdout[1]);
- *foutptr = fout;
+ *fdoutptr = fdout[0];
}
- if(ferrptr != NULL) {
+ if(fderrptr != NULL) {
close(fderr[1]);
- *ferrptr = ferr;
+ *fderrptr = fderr[0];
}
return pid;
error:
err = errno;
- if(fin != NULL) {
- fclose(fin);
- fdin[1] = -1;
- }
- if(fout != NULL) {
- fclose(fout);
- fdout[0] = -1;
- }
- if(ferr != NULL) {
- fclose(ferr);
- fderr[0] = -1;
- }
-
close_pipe(fdin);
close_pipe(fdout);
close_pipe(fderr);
* of the pointers will have been set.
*/
pid_t popen3(char *const *command,
- FILE **finptr,
- FILE **foutptr,
- FILE **ferrptr);
+ int *fdinptr,
+ int *fdoutptr,
+ int *fderrptr);
#endif /* _POPEN3_H_ */
#ifdef USE_DNSTAP
#include "dnstap/dnstap_collector.h"
#endif
+#include "verify.h"
#define RELOAD_SYNC_TIMEOUT 25 /* seconds */
nsd->reuseport = 0;
}
+ /* open server interface ports for verifiers */
+ for(i = 0; i < nsd->verify_ifs; i++) {
+ if(open_udp_socket(nsd, &nsd->verify_udp[i], NULL) == -1 ||
+ open_tcp_socket(nsd, &nsd->verify_tcp[i], NULL) == -1)
+ {
+ return -1;
+ }
+ }
+
return 0;
}
}
#endif /* BIND8_STATS */
+void server_verify(struct nsd *nsd, int cmdsocket);
+
/*
* Reload the database, stop parent, re-fork children and continue.
* as server_main.
int ret;
udb_ptr last_task;
struct sigaction old_sigchld, ign_sigchld;
+ struct radnode* node;
+ zone_type* zone;
+ enum soainfo_hint hint;
/* ignore SIGCHLD from the previous server_main that used this pid */
memset(&ign_sigchld, 0, sizeof(ign_sigchld));
ign_sigchld.sa_handler = SIG_IGN;
server_zonestat_switch(nsd);
#endif
+ if(nsd->options->verify_enable) {
+#ifdef RATELIMIT
+ /* allocate resources for rate limiting. use a slot that is guaranteed
+ not mapped to a file so no persistent data is overwritten */
+ rrl_init(nsd->child_count + 1);
+#endif
+
+ /* spin-up server and execute verifiers for each zone */
+ server_verify(nsd, cmdsocket);
+#ifdef RATELIMIT
+ /* deallocate rate limiting resources */
+ rrl_deinit(nsd->child_count + 1);
+#endif
+ }
+
+ for(node = radix_first(nsd->db->zonetree);
+ node != NULL;
+ node = radix_next(node))
+ {
+ zone = (zone_type *)node->elem;
+ if(zone->is_updated) {
+ if(zone->is_bad) {
+ nsd->mode = NSD_RELOAD_FAILED;
+ hint = soainfo_bad;
+ } else {
+ hint = soainfo_ok;
+ }
+ /* update(s), verified or not, possibly with subsequent
+ skipped update(s). skipped update(s) are picked up
+ by failed update check in xfrd */
+ task_new_soainfo(nsd->task[nsd->mytask], &last_task,
+ zone, hint);
+ } else if(zone->is_skipped) {
+ /* corrupt or inconsistent update without preceding
+ update(s), communicate soainfo_gone */
+ task_new_soainfo(nsd->task[nsd->mytask], &last_task,
+ zone, soainfo_gone);
+ }
+ zone->is_updated = 0;
+ zone->is_skipped = 0;
+ }
+
+ if(nsd->mode == NSD_RELOAD_FAILED) {
+ exit(NSD_RELOAD_FAILED);
+ }
+
/* listen for the signals of failed children again */
sigaction(SIGCHLD, &old_sigchld, NULL);
#ifdef USE_DNSTAP
restart_child_servers(nsd, server_region, netio,
&nsd->xfrd_listener->fd);
} else if (child_pid == reload_pid) {
- sig_atomic_t cmd = NSD_RELOAD_DONE;
+ sig_atomic_t cmd = NSD_RELOAD_FAILED;
pid_t mypid;
log_msg(LOG_WARNING,
"Reload process %d failed with status %d, continuing with old database",
(int) child_pid, status);
reload_pid = -1;
if(reload_listener.fd != -1) close(reload_listener.fd);
+ netio_remove_handler(netio, &reload_listener);
reload_listener.fd = -1;
reload_listener.event_types = NETIO_EVENT_NONE;
task_process_sync(nsd->task[nsd->mytask]);
nsd->restart_children = 0;
}
if(nsd->reload_failed) {
- sig_atomic_t cmd = NSD_RELOAD_DONE;
+ sig_atomic_t cmd = NSD_RELOAD_FAILED;
pid_t mypid;
nsd->reload_failed = 0;
log_msg(LOG_WARNING,
(int) reload_pid);
reload_pid = -1;
if(reload_listener.fd != -1) close(reload_listener.fd);
+ netio_remove_handler(netio, &reload_listener);
reload_listener.fd = -1;
reload_listener.event_types = NETIO_EVENT_NONE;
task_process_sync(nsd->task[nsd->mytask]);
data->event_added = 1;
}
+/*
+ * Serve DNS request to verifiers (short-lived)
+ */
+void server_verify(struct nsd *nsd, int cmdsocket)
+{
+ size_t size = 0;
+ struct event cmd_event, signal_event, exit_event;
+ struct zone *zone;
+
+ assert(nsd != NULL);
+
+ zone = verify_next_zone(nsd, NULL);
+ if(zone == NULL)
+ return;
+
+ nsd->server_region = region_create(xalloc, free);
+ nsd->event_base = nsd_child_event_base();
+
+ nsd->next_zone_to_verify = zone;
+ nsd->verifier_count = 0;
+ nsd->verifier_limit = nsd->options->verifier_count;
+ size = sizeof(struct verifier) * nsd->verifier_limit;
+ pipe(nsd->verifier_pipe);
+ fcntl(nsd->verifier_pipe[0], F_SETFD, FD_CLOEXEC);
+ fcntl(nsd->verifier_pipe[1], F_SETFD, FD_CLOEXEC);
+ nsd->verifiers = region_alloc_zero(nsd->server_region, size);
+
+ for(size_t i = 0; i < nsd->verifier_limit; i++) {
+ nsd->verifiers[i].nsd = nsd;
+ nsd->verifiers[i].zone = NULL;
+ nsd->verifiers[i].pid = -1;
+ nsd->verifiers[i].output_stream.fd = -1;
+ nsd->verifiers[i].output_stream.priority = LOG_INFO;
+ nsd->verifiers[i].error_stream.fd = -1;
+ nsd->verifiers[i].error_stream.priority = LOG_ERR;
+ }
+
+ event_set(&cmd_event, cmdsocket, EV_READ|EV_PERSIST, verify_handle_command, nsd);
+ if(event_base_set(nsd->event_base, &cmd_event) != 0 ||
+ event_add(&cmd_event, NULL) != 0)
+ {
+ log_msg(LOG_ERR, "verify: could not add command event");
+ goto fail;
+ }
+
+ event_set(&signal_event, SIGCHLD, EV_SIGNAL|EV_PERSIST, verify_handle_signal, nsd);
+ if(event_base_set(nsd->event_base, &signal_event) != 0 ||
+ signal_add(&signal_event, NULL) != 0)
+ {
+ log_msg(LOG_ERR, "verify: could not add signal event");
+ goto fail;
+ }
+
+ event_set(&exit_event, nsd->verifier_pipe[0], EV_READ|EV_PERSIST, verify_handle_exit, nsd);
+ if(event_base_set(nsd->event_base, &exit_event) != 0 ||
+ event_add(&exit_event, NULL) != 0)
+ {
+ log_msg(LOG_ERR, "verify: could not add exit event");
+ goto fail;
+ }
+
+ memset(msgs, 0, sizeof(msgs));
+ for (int i = 0; i < NUM_RECV_PER_SELECT; i++) {
+ queries[i] = query_create(nsd->server_region,
+ compressed_dname_offsets,
+ compression_table_size, compressed_dnames);
+ query_reset(queries[i], UDP_MAX_MESSAGE_LEN, 0);
+ iovecs[i].iov_base = buffer_begin(queries[i]->packet);
+ iovecs[i].iov_len = buffer_remaining(queries[i]->packet);
+ msgs[i].msg_hdr.msg_iov = &iovecs[i];
+ msgs[i].msg_hdr.msg_iovlen = 1;
+ msgs[i].msg_hdr.msg_name = &queries[i]->addr;
+ msgs[i].msg_hdr.msg_namelen = queries[i]->addrlen;
+ }
+
+ for (size_t i = 0; i < nsd->verify_ifs; i++) {
+ struct udp_handler_data *data;
+ data = region_alloc_zero(
+ nsd->server_region, sizeof(*data));
+ add_udp_handler(nsd, &nsd->verify_udp[i], data);
+ }
+
+ tcp_accept_handler_count = nsd->verify_ifs;
+ tcp_accept_handlers = region_alloc_array(nsd->server_region,
+ nsd->verify_ifs, sizeof(*tcp_accept_handlers));
+
+ for (size_t i = 0; i < nsd->verify_ifs; i++) {
+ struct tcp_accept_handler_data *data;
+ data = &tcp_accept_handlers[i];
+ memset(data, 0, sizeof(*data));
+ add_tcp_handler(nsd, &nsd->verify_tcp[i], data);
+ }
+
+ while(nsd->next_zone_to_verify != NULL &&
+ nsd->verifier_count < nsd->verifier_limit)
+ {
+ verify_zone(nsd, nsd->next_zone_to_verify);
+ nsd->next_zone_to_verify
+ = verify_next_zone(nsd, nsd->next_zone_to_verify);
+ }
+
+ /* short-lived main loop */
+ event_base_dispatch(nsd->event_base);
+
+ /* remove command and exit event handlers */
+ event_del(&exit_event);
+ event_del(&signal_event);
+ event_del(&cmd_event);
+
+ assert(nsd->next_zone_to_verify == NULL || nsd->mode == NSD_QUIT);
+ assert(nsd->verifier_count == 0 || nsd->mode == NSD_QUIT);
+fail:
+ event_base_free(nsd->event_base);
+ close(nsd->verifier_pipe[0]);
+ close(nsd->verifier_pipe[1]);
+ region_destroy(nsd->server_region);
+
+ nsd->event_base = NULL;
+ nsd->server_region = NULL;
+ nsd->verifier_limit = 0;
+ nsd->verifier_pipe[0] = -1;
+ nsd->verifier_pipe[1] = -1;
+ nsd->verifiers = NULL;
+}
+
/*
* Serve DNS requests.
*/
--- /dev/null
+/*
+ * verify.c -- running verifiers and serving the zone to be verified.
+ *
+ * Copyright (c) 2012-2020, NLnet Labs. All rights reserved.
+ *
+ * See LICENSE for the license.
+ *
+ */
+
+#include "config.h"
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef HAVE_SYSLOG_H
+#include <syslog.h>
+#endif /* HAVE_SYSLOG_H */
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/wait.h>
+
+#include "region-allocator.h"
+#include "namedb.h"
+#include "nsd.h"
+#include "options.h"
+#include "difffile.h"
+#include "verify.h"
+#include "popen3.h"
+
+struct zone *verify_next_zone(struct nsd *nsd, struct zone *zone)
+{
+ int verify;
+ struct radnode *node;
+
+ if(zone != NULL) {
+ node = radix_next(zone->node);
+ } else {
+ node = radix_first(nsd->db->zonetree);
+ }
+
+ while(node != NULL) {
+ zone = (struct zone *)node->elem;
+ verify = zone->opts->pattern->verify_zone;
+ if(verify == VERIFY_ZONE_INHERIT) {
+ verify = nsd->options->verify_zones;
+ }
+ if(verify && zone->is_updated && !zone->is_checked) {
+ return zone;
+ }
+ node = radix_next(node);
+ }
+
+ return NULL;
+}
+
+static inline ssize_t fill_buffer(struct verifier_stream *stream)
+{
+ ssize_t cnt = 0;
+
+ assert(stream);
+ assert(stream->fd != -1);
+ assert(stream->cnt <= LOGBUFSIZE);
+ assert(stream->off <= stream->cnt);
+
+ // move data to start of buffer assuming all complete lines are printed
+ if (stream->off) {
+ size_t len = stream->cnt - stream->off;
+ memmove(stream->buf, stream->buf + stream->off, len);
+ stream->off = 0;
+ stream->cnt = len;
+ stream->buf[stream->cnt] = '\0'; // always null-terminate
+ }
+
+ // read data if space is available
+ cnt = read(stream->fd, stream->buf + stream->cnt, LOGBUFSIZE - stream->cnt);
+ if (cnt > 0)
+ stream->cnt += (size_t)cnt;
+ assert(stream->cnt <= LOGBUFSIZE);
+ assert(stream->off <= stream->cnt);
+ stream->buf[stream->cnt] = '\0'; // always null-terminate
+
+ return cnt;
+}
+
+static inline size_t print_line(struct verifier_stream *stream, int eof)
+{
+ char *eol = NULL;
+ size_t len;
+ const char *fmt;
+
+ if (stream->cnt == 0)
+ return 0;
+ assert(stream->off <= stream->cnt);
+ if (stream->off == stream->cnt)
+ return 0;
+
+ // try to locate natural line break
+ assert(stream->buf[stream->cnt] == '\0');
+ if ((eol = strchr(stream->buf + stream->off, '\n'))) {
+ len = eol - (stream->buf + stream->off);
+ } else {
+ len = stream->cnt - stream->off;
+ }
+
+ assert(len <= (stream->cnt - stream->off));
+ // wait for buffer to contain a full line except on eof
+ if (len < LOGLINELEN && !eol && !eof)
+ return 0;
+
+ if (len > LOGLINELEN) {
+ fmt = stream->cut ? ".. %.*s .." : "%.*s ..";
+ len = LOGLINELEN; // remainder printed next iteration
+ stream->cut = 1;
+ } else {
+ fmt = stream->cut ? ".. %.*s" : "%.*s";
+ stream->cut = 0;
+ }
+ log_msg(stream->priority, fmt, len, stream->buf + stream->off);
+
+ stream->off += len + (eol != NULL);
+ assert(stream->off <= stream->cnt);
+ return len;
+}
+
+/*
+ * Log verifier output on STDOUT and STDERR. Lines longer than LOGLINELEN are
+ * split over multiple lines. Line-breaks are indicated in the log with "...".
+ */
+static void verify_handle_stream(int fd, short event, void *arg)
+{
+ int eof = 0;
+ ssize_t cnt;
+ struct verifier *verifier;
+ struct verifier_stream *stream;
+
+ assert(event & EV_READ);
+ assert(arg != NULL);
+
+ verifier = (struct verifier *)arg;
+ if (fd == verifier->output_stream.fd) {
+ stream = &verifier->output_stream;
+ } else {
+ assert(fd == verifier->error_stream.fd);
+ stream = &verifier->error_stream;
+ }
+
+ assert(stream);
+ assert(stream->fd != -1);
+
+ do {
+ cnt = fill_buffer(stream);
+ eof = !cnt || (cnt < 0 && errno != EAGAIN && errno != EINTR);
+ while (print_line(stream, eof)) ;
+ } while (cnt > 0);
+
+ if(eof) {
+ event_del(&stream->event);
+ close(stream->fd);
+ stream->fd = -1;
+ }
+}
+
+static void kill_verifier(struct verifier *verifier)
+{
+ assert(verifier != NULL);
+ assert(verifier->zone != NULL);
+
+ if(kill(verifier->pid, SIGTERM) == -1) {
+ log_msg(LOG_ERR, "verify: cannot kill verifier for "
+ "zone %s (pid %d): %s",
+ verifier->zone->opts->name,
+ verifier->pid,
+ strerror(errno));
+ }
+}
+
+static void close_stream(struct verifier *verifier, struct verifier_stream *stream)
+{
+ if (stream->fd == -1)
+ return;
+ verify_handle_stream(stream->fd, EV_READ, verifier);
+ if (stream->fd == -1)
+ return;
+ event_del(&stream->event);
+ close(stream->fd);
+ stream->fd = -1;
+}
+
+static void close_verifier(struct verifier *verifier)
+{
+ /* unregister events and close streams (in that order) */
+ if(verifier->timeout.tv_sec > 0) {
+ event_del(&verifier->timeout_event);
+ verifier->timeout.tv_sec = 0;
+ verifier->timeout.tv_usec = 0;
+ }
+
+ if(verifier->zone_feed.fh != NULL) {
+ event_del(&verifier->zone_feed.event);
+ fclose(verifier->zone_feed.fh);
+ verifier->zone_feed.fh = NULL;
+ region_destroy(verifier->zone_feed.region);
+ }
+
+ close_stream(verifier, &verifier->error_stream);
+ close_stream(verifier, &verifier->output_stream);
+
+ verifier->zone->is_ok = verifier->was_ok;
+ verifier->pid = -1;
+ verifier->zone = NULL;
+}
+
+/*
+ * Feed zone to verifier over STDIN as it becomes available.
+ */
+static void verify_handle_feed(int fd, short event, void *arg)
+{
+ struct verifier *verifier;
+ struct rr *rr;
+
+ (void)fd;
+ assert(event == EV_WRITE);
+ assert(arg != NULL);
+
+ verifier = (struct verifier *)arg;
+ if((rr = zone_rr_iter_next(&verifier->zone_feed.rriter)) != NULL) {
+ print_rr(verifier->zone_feed.fh,
+ verifier->zone_feed.rrprinter,
+ rr,
+ verifier->zone_feed.region,
+ verifier->zone_feed.buffer);
+ } else {
+ event_del(&verifier->zone_feed.event);
+ fclose(verifier->zone_feed.fh);
+ verifier->zone_feed.fh = NULL;
+ region_destroy(verifier->zone_feed.region);
+ }
+}
+
+/*
+ * This handler will be called when a verifier-timeout alarm goes off. It just
+ * kills the verifier. server_verify_zones will make sure the zone will be
+ * considered bad.
+ */
+void verify_handle_timeout(int fd, short event, void *arg)
+{
+ struct verifier *verifier;
+
+ (void)fd;
+ assert(event & EV_TIMEOUT);
+ assert(arg != NULL);
+
+ verifier = (struct verifier *)arg;
+ verifier->zone->is_bad = 1;
+
+ log_msg(LOG_ERR, "verify: verifier for zone %s (pid %d) timed out",
+ verifier->zone->opts->name, verifier->pid);
+
+ /* kill verifier, process reaped by exit handler */
+ kill_verifier(verifier);
+}
+
+void verify_handle_signal(int sig, short event, void *arg)
+{
+ char buf[1] = { '\0' };
+ struct nsd *nsd;
+
+ assert(sig == SIGCHLD);
+ assert(event & EV_SIGNAL);
+ assert(arg != NULL);
+
+ nsd = (struct nsd *)arg;
+ (void)write(nsd->verifier_pipe[1], buf, sizeof(buf));
+}
+
+/*
+ * Reap process and update status of respective zone based on the exit code
+ * of a verifier. Everything from STDOUT and STDERR still available is read and
+ * written to the log as it might contain valuable information.
+ *
+ * NOTE: A timeout might have caused the verifier to be terminated.
+ */
+void verify_handle_exit(int fd, short event, void *arg)
+{
+ int wstatus;
+ pid_t pid;
+ struct nsd *nsd;
+ char buf[1];
+
+ assert(event & EV_READ);
+ assert(arg != NULL);
+
+ nsd = (struct nsd *)arg;
+
+ (void)read(fd, buf, sizeof(buf));
+
+ while(((pid = waitpid(-1, &wstatus, WNOHANG)) == -1 && errno == EINTR)
+ || (pid > 0))
+ {
+ struct verifier *verifier = NULL;
+
+ for(size_t i = 0; !verifier && i < nsd->verifier_limit; i++) {
+ if(nsd->verifiers[i].zone != NULL &&
+ nsd->verifiers[i].pid == pid)
+ {
+ verifier = &nsd->verifiers[i];
+ }
+ }
+
+ if(verifier == NULL) {
+ continue;
+ }
+
+ if(!WIFEXITED(wstatus)) {
+ log_msg(LOG_ERR, "verify: verifier for zone %s "
+ "(pid %d) exited abnormally",
+ verifier->zone->opts->name, pid);
+ } else {
+ int priority = LOG_INFO;
+ int status = WEXITSTATUS(wstatus);
+ if(status != 0) {
+ priority = LOG_ERR;
+ verifier->zone->is_bad = 1;
+ }
+ log_msg(priority, "verify: verifier for zone %s "
+ "(pid %d) exited with %d",
+ verifier->zone->opts->name, pid, status);
+ }
+
+ close_verifier(verifier);
+ nsd->verifier_count--;
+ }
+
+ while(nsd->mode == NSD_RUN &&
+ nsd->verifier_count < nsd->verifier_limit &&
+ nsd->next_zone_to_verify != NULL)
+ {
+ verify_zone(nsd, nsd->next_zone_to_verify);
+ nsd->next_zone_to_verify
+ = verify_next_zone(nsd, nsd->next_zone_to_verify);
+ }
+
+ if(nsd->next_zone_to_verify == NULL && nsd->verifier_count == 0) {
+ event_base_loopexit(nsd->event_base, NULL);
+ return;
+ }
+}
+
+/*
+ * A parent may be terminated (by the NSD_QUIT signal (nsdc stop command)).
+ * When a reload server process is running, the parent will then send a
+ * NSD_QUIT command to that server. This handler makes sure that this command
+ * is not neglected and that the reload server process will exit (gracefully).
+ */
+void
+verify_handle_command(int fd, short event, void *arg)
+{
+ struct nsd *nsd = (struct nsd *)arg;
+ int len;
+ sig_atomic_t mode;
+
+ assert(nsd != NULL);
+ assert(event & (EV_READ
+#ifdef EV_CLOSED
+ | EV_CLOSED
+#endif
+ ));
+
+ if((len = read(fd, &mode, sizeof(mode))) == -1) {
+ log_msg(LOG_ERR, "verify: verify_handle_command: read: %s",
+ strerror(errno));
+ return;
+ } else if(len == 0) {
+ log_msg(LOG_INFO, "verify: command channel closed");
+ mode = NSD_QUIT;
+ } else if(mode != NSD_QUIT) {
+ log_msg(LOG_ERR, "verify: bad command: %d", (int)mode);
+ return;
+ }
+
+ nsd->mode = mode;
+
+ if(nsd->verifier_count == 0) {
+ event_base_loopexit(nsd->event_base, NULL);
+ return; /* exit early if no verifiers are executing */
+ }
+
+ /* kill verifiers, processes reaped elsewhere */
+ for(size_t i = 0; i < nsd->verifier_limit; i++) {
+ if(nsd->verifiers[i].zone != NULL) {
+ kill_verifier(&nsd->verifiers[i]);
+ }
+ }
+}
+
+/*
+ * A verifier is executed for the specified zone (if a verifier is configured
+ * and the zone has not been verified before). If one of the verifiers exits
+ * with non-zero, the zone is marked bad and nsd drops the zone update and
+ * reloads again.
+ */
+void verify_zone(struct nsd *nsd, struct zone *zone)
+{
+ struct verifier *verifier = NULL;
+ int32_t timeout;
+ char **command;
+ FILE *fin;
+ int fdin, fderr, fdout, flags;
+
+ assert(nsd != NULL);
+ assert(nsd->verifier_count < nsd->verifier_limit);
+ assert(zone != NULL);
+
+ fin = NULL;
+ fdin = fdout = fderr = -1;
+
+ /* search for available verifier slot */
+ for(size_t i = 0; i < nsd->verifier_limit && !verifier; i++) {
+ if(nsd->verifiers[i].zone == NULL) {
+ verifier = &nsd->verifiers[i];
+ }
+ }
+
+ assert(verifier != NULL);
+
+ if(zone->opts->pattern->verifier != NULL) {
+ command = zone->opts->pattern->verifier;
+ } else if (nsd->options->verifier != NULL) {
+ command = nsd->options->verifier;
+ } else {
+ log_msg(LOG_ERR, "verify: no verifier for zone %s",
+ zone->opts->name);
+ return;
+ }
+
+ if(zone->opts->pattern->verifier_timeout
+ != VERIFIER_TIMEOUT_INHERIT)
+ {
+ timeout = zone->opts->pattern->verifier_timeout;
+ } else {
+ timeout = nsd->options->verifier_timeout;
+ }
+
+ if(zone->opts->pattern->verifier_feed_zone
+ != VERIFIER_FEED_ZONE_INHERIT)
+ {
+ fdin = zone->opts->pattern->verifier_feed_zone ? -2 : -1;
+ } else {
+ fdin = nsd->options->verifier_feed_zone ? -2 : -1;
+ }
+
+ assert(timeout >= 0);
+
+ setenv("VERIFY_ZONE", zone->opts->name, 1);
+ setenv("VERIFY_ZONE_ON_STDIN", fdin == -2 ? "yes" : "no", 1);
+
+ verifier->pid = popen3(
+ command, fdin == -2 ? &fdin : NULL, &fdout, &fderr);
+ if(verifier->pid == -1) {
+ log_msg(LOG_ERR, "verify: could not start verifier for zone "
+ "%s: %s", zone->opts->name, strerror(errno));
+ goto fail_popen3;
+ }
+ flags = fcntl(fderr, F_GETFL, 0);
+ if (fcntl(fderr, F_SETFL, flags | O_NONBLOCK) == -1) {
+ log_msg(LOG_ERR, "verify: fcntl(stderr, ..., O_NONBLOCK) for "
+ "zone %s: %s",
+ zone->opts->name, strerror(errno));
+ goto fail_fcntl;
+ }
+ flags = fcntl(fdout, F_GETFL, 0);
+ if(fcntl(fdout, F_SETFL, flags | O_NONBLOCK) == -1) {
+ log_msg(LOG_ERR, "verify: fcntl(stdout, ..., O_NONBLOCK) for "
+ "zone %s: %s",
+ zone->opts->name, strerror(errno));
+ goto fail_fcntl;
+ }
+ if (fdin >= 0) {
+ if ((fin = fdopen(fdin, "w")) == NULL) {
+ log_msg(LOG_ERR, "verify: fdopen(stdin, ...) for "
+ "zone %s: %s",
+ zone->opts->name, strerror(errno));
+ goto fail_fcntl;
+ }
+ /* write unbuffered */
+ setbuf(fin, NULL);
+ }
+
+ verifier->zone = zone;
+ verifier->was_ok = zone->is_ok;
+
+ unsetenv("VERIFY_ZONE");
+ unsetenv("VERIFY_ZONE_ON_STDIN");
+
+ verifier->error_stream.fd = fderr;
+ verifier->error_stream.cnt = 0;
+ verifier->error_stream.off = 0;
+ verifier->error_stream.buf[0] = '\0';
+ event_set(&verifier->error_stream.event,
+ verifier->error_stream.fd,
+ EV_READ|EV_PERSIST,
+ verify_handle_stream,
+ verifier);
+ event_base_set(nsd->event_base, &verifier->error_stream.event);
+ if(event_add(&verifier->error_stream.event, NULL) != 0) {
+ log_msg(LOG_ERR, "verify: could not add error event for "
+ "zone %s", zone->opts->name);
+ goto fail_stderr;
+ }
+
+ verifier->output_stream.fd = fdout;
+ verifier->output_stream.cnt = 0;
+ verifier->output_stream.off = 0;
+ verifier->output_stream.buf[0] = '\0';
+ event_set(&verifier->output_stream.event,
+ verifier->output_stream.fd,
+ EV_READ|EV_PERSIST,
+ verify_handle_stream,
+ verifier);
+ event_base_set(nsd->event_base, &verifier->output_stream.event);
+ if(event_add(&verifier->output_stream.event, NULL) != 0) {
+ log_msg(LOG_ERR, "verify: could not add output event for "
+ "zone %s", zone->opts->name);
+ goto fail_stdout;
+ }
+
+ if(fin != NULL) {
+ verifier->zone_feed.fh = fin;
+
+ zone_rr_iter_init(&verifier->zone_feed.rriter, zone);
+
+ verifier->zone_feed.rrprinter
+ = create_pretty_rr(nsd->server_region);
+ verifier->zone_feed.region
+ = region_create(xalloc, free);
+ verifier->zone_feed.buffer
+ = buffer_create(nsd->server_region, MAX_RDLENGTH);
+
+ event_set(&verifier->zone_feed.event,
+ fileno(verifier->zone_feed.fh),
+ EV_WRITE|EV_PERSIST,
+ &verify_handle_feed,
+ verifier);
+ event_base_set(nsd->event_base, &verifier->zone_feed.event);
+ if(event_add(&verifier->zone_feed.event, NULL) != 0) {
+ log_msg(LOG_ERR, "verify: could not add input event "
+ "for zone %s", zone->opts->name);
+ goto fail_stdin;
+ }
+ }
+
+ if(timeout > 0) {
+ verifier->timeout.tv_sec = timeout;
+ verifier->timeout.tv_usec = 0;
+ event_set(&verifier->timeout_event,
+ -1,
+ EV_TIMEOUT,
+ verify_handle_timeout,
+ verifier);
+ event_base_set(nsd->event_base, &verifier->timeout_event);
+ if(event_add(&verifier->timeout_event, &verifier->timeout) != 0) {
+ log_msg(LOG_ERR, "verify: could not add timeout event "
+ "for zone %s", zone->opts->name);
+ goto fail_timeout;
+ }
+
+ log_msg(LOG_INFO, "verify: started verifier for zone %s "
+ "(pid %d), timeout is %d seconds",
+ zone->opts->name, verifier->pid, timeout);
+ } else {
+ log_msg(LOG_INFO, "verify: started verifier for zone %s "
+ "(pid %d)", zone->opts->name, verifier->pid);
+ }
+
+ zone->is_ok = 1;
+ nsd->verifier_count++;
+ return;
+
+fail_timeout:
+ verifier->timeout.tv_sec = 0;
+ verifier->timeout.tv_usec = 0;
+ if(fin != NULL) {
+ event_del(&verifier->zone_feed.event);
+ }
+fail_stdin:
+ verifier->zone_feed.fh = NULL;
+ event_del(&verifier->output_stream.event);
+fail_stdout:
+ verifier->output_stream.fd = -1;
+ event_del(&verifier->error_stream.event);
+fail_stderr:
+ verifier->error_stream.fd = -1;
+fail_fcntl:
+ kill_verifier(verifier);
+ if(fin != NULL) {
+ fclose(fin);
+ } else if (fdin >= 0) {
+ close(fdin);
+ }
+ close(fdout);
+ close(fderr);
+fail_popen3:
+ zone->is_bad = 1;
+ verifier->pid = -1;
+ verifier->zone = NULL;
+}
--- /dev/null
+/*
+ * verify.h
+ *
+ * Copyright (c) 2020, NLnet Labs. All rights reserved.
+ *
+ * See LICENSE for the license.
+ */
+#ifndef _VERIFY_H_
+#define _VERIFY_H_
+
+#ifndef USE_MINI_EVENT
+# ifdef HAVE_EVENT_H
+# include <event.h>
+# else
+# include <event2/event.h>
+# include "event2/event_struct.h"
+# include "event2/event_compat.h"
+# endif
+#else
+# include "mini_event.h"
+#endif
+
+/*
+ * Track position in zone to feed verifier more data as the input descriptor
+ * becomes available.
+ */
+struct verifier_zone_feed {
+ FILE *fh;
+ struct event event;
+ zone_rr_iter_type rriter;
+ struct state_pretty_rr *rrprinter;
+ struct region *region;
+ struct buffer *buffer;
+};
+
+/* 40 is (estimated) space already used on each logline.
+ * (time, pid, priority, etc)
+ */
+#define LOGLINELEN (MAXSYSLOGMSGLEN-40)
+
+#define LOGBUFSIZE (LOGLINELEN * 2)
+
+/*
+ * STDOUT and STDERR are logged per line. Lines that exceed LOGLINELEN, are
+ * split over multiple entries. Line breaks are indicated with "..." in the log
+ * before and after the break.
+ */
+struct verifier_stream {
+ int fd;
+ struct event event;
+ int priority;
+ int cut;
+ char buf[LOGBUFSIZE+1];
+ size_t cnt;
+ size_t off;
+};
+
+struct verifier {
+ struct nsd *nsd;
+ struct zone *zone;
+ pid_t pid;
+ int was_ok;
+ struct timeval timeout;
+ struct event timeout_event;
+ struct verifier_zone_feed zone_feed;
+ struct verifier_stream output_stream;
+ struct verifier_stream error_stream;
+};
+
+struct zone *verify_next_zone(struct nsd *nsd, struct zone *zone);
+
+void verify_zone(struct nsd *nsd, struct zone *zone);
+
+void verify_handle_signal(int sig, short event, void *arg);
+
+void verify_handle_exit(int fd, short event, void *arg);
+
+void verify_handle_command(int fd, short event, void *arg);
+
+#endif /* _VERIFY_H_ */
xfrd_setup_packet(tcp->packet, TYPE_AXFR, CLASS_IN, zone->apex,
zone->query_id);
- zone->query_type = TYPE_AXFR;
+ xfrd_prepare_zone_xfr(zone, TYPE_AXFR);
} else {
DEBUG(DEBUG_XFRD,1, (LOG_INFO, "request incremental zone "
"transfer (IXFR) for %s to %s",
xfrd_setup_packet(tcp->packet, TYPE_IXFR, CLASS_IN, zone->apex,
zone->query_id);
- zone->query_type = TYPE_IXFR;
+ xfrd_prepare_zone_xfr(zone, TYPE_IXFR);
NSCOUNT_SET(tcp->packet, 1);
xfrd_write_soa_buffer(tcp->packet, zone->apex, &zone->soa_disk);
}
- /* old transfer needs to be removed still? */
- if(zone->msg_seq_nr)
- xfrd_unlink_xfrfile(xfrd->nsd, zone->xfrfilenumber);
- zone->msg_seq_nr = 0;
- zone->msg_rr_count = 0;
if(zone->master->key_options && zone->master->key_options->tsig_key) {
- xfrd_tsig_sign_request(tcp->packet, &zone->tsig, zone->master);
+ xfrd_tsig_sign_request(
+ tcp->packet, &zone->latest_xfr->tsig, zone->master);
}
buffer_flip(tcp->packet);
DEBUG(DEBUG_XFRD,1, (LOG_INFO, "sent tcp query with ID %d", zone->query_id));
#endif
ret = conn_read(tcp);
if(ret == -1) {
- log_msg(LOG_ERR, "xfrd: failed writing tcp %s", strerror(errno));
+ log_msg(LOG_ERR, "xfrd: failed reading tcp %s", strerror(errno));
xfrd_tcp_pipe_stop(tp);
return;
}
/* set the write timer to activate */
static void xfrd_write_timer_set(void);
+static void xfrd_free_zone_xfr(xfrd_zone_type* zone, xfrd_xfr_type* xfr);
+
static void
xfrd_signal_callback(int sig, short event, void* ATTR_UNUSED(arg))
{
xfrd->reload_added = 0;
xfrd->reload_timeout.tv_sec = 0;
xfrd->reload_cmd_last_sent = xfrd->xfrd_start_time;
+ xfrd->reload_cmd_first_sent = 0;
+ xfrd->reload_failed = 0;
xfrd->can_send_reload = !reload_active;
xfrd->reload_pid = nsd_pid;
xfrd->child_timer_added = 0;
/* unlink xfr files for running transfers */
RBTREE_FOR(zone, xfrd_zone_type*, xfrd->zones)
{
- if(zone->msg_seq_nr)
- xfrd_unlink_xfrfile(xfrd->nsd, zone->xfrfilenumber);
+ xfrd_xfr_type *xfr;
+ for(xfr = zone->latest_xfr; xfr != NULL; xfr = xfr->prev) {
+ if (xfr->acquired == 0)
+ continue;
+ xfrd_unlink_xfrfile(xfrd->nsd, xfr->xfrfilenumber);
+ }
}
/* unlink xfr files in not-yet-done task file */
xfrd_clean_pending_tasks(xfrd->nsd, xfrd->nsd->task[xfrd->nsd->mytask]);
if(xfrd->zones) {
xfrd_zone_type* z;
RBTREE_FOR(z, xfrd_zone_type*, xfrd->zones) {
- tsig_delete_record(&z->tsig, NULL);
+ while(z->latest_xfr != NULL) {
+ xfrd_free_zone_xfr(z, z->latest_xfr);
+ }
}
}
if(xfrd->notify_zones) {
xzone->soa_nsd_acquired = 0;
xzone->soa_disk_acquired = 0;
+ xzone->latest_xfr = NULL;
xzone->soa_notified_acquired = 0;
/* [0]=1, [1]=0; "." domain name */
xzone->soa_nsd.prim_ns[0] = 1;
xzone->multi_master_first_master = -1;
xzone->multi_master_update_check = -1;
- tsig_create_record_custom(&xzone->tsig, NULL, 0, 0, 4);
/* set refreshing anyway, if we have data it may be old */
xfrd_set_refresh_now(xzone);
xfrd_soa_type soa;
xfrd_soa_type* soa_ptr = &soa;
xfrd_zone_type* zone;
+ xfrd_xfr_type* xfr;
+ xfrd_xfr_type* prev_xfr;
+ enum soainfo_hint hint;
+ time_t before, acquired = 0;
DEBUG(DEBUG_IPC,1, (LOG_INFO, "xfrd: process SOAINFO %s",
dname_to_string(task->zname, 0)));
zone = (xfrd_zone_type*)rbtree_search(xfrd->zones, task->zname);
+ hint = (enum soainfo_hint)task->yesno;
if(task->size <= sizeof(struct task_list_d)+dname_total_size(
task->zname)+sizeof(uint32_t)*6 + sizeof(uint8_t)*2) {
- /* NSD has zone without any info */
- DEBUG(DEBUG_IPC,1, (LOG_INFO, "SOAINFO for %s lost zone",
- dname_to_string(task->zname,0)));
+ DEBUG(DEBUG_IPC,1, (LOG_INFO, "SOAINFO for %s %s zone",
+ dname_to_string(task->zname,0),
+ hint == soainfo_bad ? "kept" : "lost"));
soa_ptr = NULL;
+ /* discard all updates */
+ before = xfrd_time();
} else {
uint8_t* p = (uint8_t*)task->zname + dname_total_size(
task->zname);
DEBUG(DEBUG_IPC,1, (LOG_INFO, "SOAINFO for %s %u",
dname_to_string(task->zname,0),
(unsigned)ntohl(soa.serial)));
+ /* discard all updates received before initial reload unless
+ reload was successful */
+ before = xfrd->reload_cmd_first_sent;
}
if(!zone) {
task->zname, soa_ptr);
return;
}
- xfrd_handle_incoming_soa(zone, soa_ptr, xfrd_time());
+
+ /* soainfo_gone and soainfo_bad are straightforward, delete all updates
+ that were transfered, i.e. acquired != 0. soainfo_ok is more
+ complicated as it is possible that there are subsequent corrupt or
+ inconsistent updates */
+ for(xfr = zone->latest_xfr; xfr; xfr = prev_xfr) {
+ prev_xfr = xfr->prev;
+ /* skip incomplete updates */
+ if(!xfr->acquired) {
+ continue;
+ }
+ if(hint == soainfo_ok) {
+ /* skip non-queued updates */
+ if(!xfr->sent)
+ continue;
+ assert(xfr->acquired <= before);
+ /* skip non-applied updates */
+ if(!soa_ptr ||
+ soa_ptr->serial != htonl(xfr->msg_new_serial))
+ continue;
+ /* updates are applied in-order, acquired time of
+ most-recent update is used as baseline */
+ if(!acquired) {
+ acquired = xfr->acquired;
+ }
+ if(xfrd->reload_failed) {
+ DEBUG(DEBUG_IPC, 1,
+ (LOG_INFO, "xfrd: zone %s mark update "
+ "to serial %u verified",
+ zone->apex_str,
+ xfr->msg_new_serial));
+ diff_update_commit(
+ zone->apex_str, DIFF_VERIFIED,
+ xfrd->nsd, xfr->xfrfilenumber);
+ return;
+ }
+ }
+ DEBUG(DEBUG_IPC, 1,
+ (LOG_INFO, "xfrd: zone %s delete update to serial %u",
+ zone->apex_str,
+ xfr->msg_new_serial));
+ xfrd_delete_zone_xfr(zone, xfr);
+ }
+
+ /* update zone state */
+ switch(hint) {
+ case soainfo_bad:
+ /* "rollback" on-disk soa information */
+ zone->soa_disk_acquired = zone->soa_nsd_acquired;
+ zone->soa_disk = zone->soa_nsd;
+
+ if(xfrd_time() - zone->soa_disk_acquired
+ >= (time_t)ntohl(zone->soa_disk.expire))
+ {
+ /* zone expired */
+ xfrd_set_zone_state(zone, xfrd_zone_expired);
+ }
+ /* do not refresh right away, like with corrupt or inconsistent
+ updates, because the zone is likely not fixed on the primary
+ yet. an immediate refresh can therefore potentially trigger
+ an update loop */
+ xfrd_set_timer_retry(zone);
+
+ if(zone->soa_notified_acquired != 0 &&
+ (zone->soa_notified.serial == 0 ||
+ compare_serial(ntohl(zone->soa_disk.serial),
+ ntohl(zone->soa_notified.serial)) >= 0))
+ { /* read was in response to this notification */
+ zone->soa_notified_acquired = 0;
+ }
+ if(zone->soa_notified_acquired && zone->state == xfrd_zone_ok)
+ {
+ /* refresh because of notification */
+ xfrd_set_zone_state(zone, xfrd_zone_refreshing);
+ xfrd_set_refresh_now(zone);
+ }
+ break;
+ case soainfo_ok:
+ if(xfrd->reload_failed)
+ break;
+ /* fall through */
+ case soainfo_gone:
+ xfrd_handle_incoming_soa(zone, soa_ptr, acquired);
+ break;
+ }
}
static void
xfrd_udp_release(z);
} else if(z->event_added)
event_del(&z->zone_handler);
- if(z->msg_seq_nr)
- xfrd_unlink_xfrfile(xfrd->nsd, z->xfrfilenumber);
- /* tsig */
- tsig_delete_record(&z->tsig, NULL);
+ while(z->latest_xfr) xfrd_delete_zone_xfr(z, z->latest_xfr);
/* z->dname is recycled when the zone_options is removed */
region_recycle(xfrd->region, z, sizeof(*z));
if(soa == NULL) {
/* nsd no longer has a zone in memory */
zone->soa_nsd_acquired = 0;
+ zone->soa_disk_acquired = 0;
xfrd_set_zone_state(zone, xfrd_zone_refreshing);
xfrd_set_refresh_now(zone);
return;
if(zone->soa_nsd_acquired && soa->serial == zone->soa_nsd.serial)
return;
- if(zone->soa_disk_acquired && soa->serial == zone->soa_disk.serial)
- {
+ if(zone->soa_disk_acquired) {
+ int cmp = compare_serial(soa->serial, zone->soa_disk.serial);
+
+ /* soa is from an update if serial equals soa_disk.serial or
+ serial is less than soa_disk.serial and the acquired time is
+ before the reload was first requested */
+ if(!((cmp == 0) || (cmp < 0 && acquired != 0))) {
+ goto zonefile;
+ }
+
+ /* acquired time of an update may not match time registered in
+ in soa_disk_acquired as a refresh indicating the current
+ serial may have occurred before the reload finished */
+ if(cmp == 0) {
+ acquired = zone->soa_disk_acquired;
+ }
+
/* soa in disk has been loaded in memory */
log_msg(LOG_INFO, "zone %s serial %u is updated to %u",
zone->apex_str, (unsigned)ntohl(zone->soa_nsd.serial),
(unsigned)ntohl(soa->serial));
- zone->soa_nsd = zone->soa_disk;
- zone->soa_nsd_acquired = zone->soa_disk_acquired;
+ zone->soa_nsd = *soa;
+ zone->soa_nsd_acquired = acquired;
xfrd->write_zonefile_needed = 1;
- /* reset exponential backoff, we got a normal timer now */
- zone->fresh_xfr_timeout = 0;
seconds_since_acquired =
xfrd_time() > zone->soa_disk_acquired
? xfrd_time() - zone->soa_disk_acquired : 0;
+
if(seconds_since_acquired < bound_soa_disk_refresh(zone))
{
- /* zone ok, wait for refresh time */
xfrd_set_zone_state(zone, xfrd_zone_ok);
- zone->round_num = -1;
- xfrd_set_timer_refresh(zone);
- } else if(seconds_since_acquired < bound_soa_disk_expire(zone))
- {
- /* zone refreshing */
- xfrd_set_zone_state(zone, xfrd_zone_refreshing);
- xfrd_set_refresh_now(zone);
- }
- if(seconds_since_acquired >= bound_soa_disk_expire(zone)) {
- /* zone expired */
- xfrd_set_zone_state(zone, xfrd_zone_expired);
- xfrd_set_refresh_now(zone);
}
- if(zone->soa_notified_acquired != 0 &&
- (zone->soa_notified.serial == 0 ||
- compare_serial(ntohl(zone->soa_disk.serial),
- ntohl(zone->soa_notified.serial)) >= 0))
- { /* read was in response to this notification */
- zone->soa_notified_acquired = 0;
- }
- if(zone->soa_notified_acquired && zone->state == xfrd_zone_ok)
- {
- /* refresh because of notification */
- xfrd_set_zone_state(zone, xfrd_zone_refreshing);
- xfrd_set_refresh_now(zone);
+ /* update refresh timers based on disk soa, unless there are
+ pending updates. i.e. serial != soa_disk.serial */
+ if (cmp == 0) {
+ /* reset exponential backoff, we got a normal timer now */
+ zone->fresh_xfr_timeout = 0;
+ if(seconds_since_acquired < bound_soa_disk_refresh(zone))
+ {
+ /* zone ok, wait for refresh time */
+ zone->round_num = -1;
+ xfrd_set_timer_refresh(zone);
+ } else if(seconds_since_acquired < bound_soa_disk_expire(zone))
+ {
+ /* zone refreshing */
+ xfrd_set_zone_state(zone, xfrd_zone_refreshing);
+ xfrd_set_refresh_now(zone);
+ }
+ if(seconds_since_acquired >= bound_soa_disk_expire(zone))
+ {
+ /* zone expired */
+ xfrd_set_zone_state(zone, xfrd_zone_expired);
+ xfrd_set_refresh_now(zone);
+ }
+
+ if(zone->soa_notified_acquired != 0 &&
+ (zone->soa_notified.serial == 0 ||
+ compare_serial(ntohl(zone->soa_disk.serial),
+ ntohl(zone->soa_notified.serial)) >= 0))
+ { /* read was in response to this notification */
+ zone->soa_notified_acquired = 0;
+ }
+ if(zone->soa_notified_acquired && zone->state == xfrd_zone_ok)
+ {
+ /* refresh because of notification */
+ xfrd_set_zone_state(zone, xfrd_zone_refreshing);
+ xfrd_set_refresh_now(zone);
+ }
}
xfrd_send_notify(xfrd->notify_zones, zone->apex, &zone->soa_nsd);
return;
}
+zonefile:
+ acquired = xfrd_time();
/* user must have manually provided zone data */
DEBUG(DEBUG_XFRD,1, (LOG_INFO,
"xfrd: zone %s serial %u from zonefile. refreshing",
xfrd_setup_packet(xfrd->packet, TYPE_IXFR, CLASS_IN, zone->apex,
qid_generate());
zone->query_id = ID(xfrd->packet);
- zone->query_type = TYPE_IXFR;
- /* delete old xfr file? */
- if(zone->msg_seq_nr)
- xfrd_unlink_xfrfile(xfrd->nsd, zone->xfrfilenumber);
- zone->msg_seq_nr = 0;
- zone->msg_rr_count = 0;
+ xfrd_prepare_zone_xfr(zone, TYPE_IXFR);
DEBUG(DEBUG_XFRD,1, (LOG_INFO, "sent query with ID %d", zone->query_id));
NSCOUNT_SET(xfrd->packet, 1);
xfrd_write_soa_buffer(xfrd->packet, zone->apex, &zone->soa_disk);
/* if we have tsig keys, sign the ixfr query */
if(zone->master->key_options && zone->master->key_options->tsig_key) {
- xfrd_tsig_sign_request(xfrd->packet, &zone->tsig, zone->master);
+ xfrd_tsig_sign_request(
+ xfrd->packet, &zone->latest_xfr->tsig, zone->master);
}
buffer_flip(xfrd->packet);
xfrd_set_timer(zone, XFRD_UDP_TIMEOUT);
domain_table_type* owners;
rdata_atom_type* rdatas;
- for(i=0; i<count; ++i,++zone->msg_rr_count)
+ for(i=0; i<count; ++i,++zone->latest_xfr->msg_rr_count)
{
if (*done) {
DEBUG(DEBUG_XFRD,1, (LOG_ERR, "xfrd: zone %s xfr has "
"unable to parse soainfo", zone->apex_str));
return 0;
}
- if(zone->msg_rr_count == 1 &&
- ntohl(soa->serial) != zone->msg_new_serial) {
+ if(zone->latest_xfr->msg_rr_count == 1 &&
+ ntohl(soa->serial) != zone->latest_xfr->msg_new_serial) {
/* 2nd RR is SOA with lower serial, this is an IXFR */
- zone->msg_is_ixfr = 1;
+ zone->latest_xfr->msg_is_ixfr = 1;
if(!zone->soa_disk_acquired) {
DEBUG(DEBUG_XFRD,1, (LOG_ERR, "xfrd: zone %s xfr "
"got ixfr but need axfr", zone->apex_str));
"bad start serial", zone->apex_str));
return 0; /* bad start serial in IXFR */
}
- zone->msg_old_serial = ntohl(soa->serial);
+ zone->latest_xfr->msg_old_serial = ntohl(soa->serial);
tmp_serial = ntohl(soa->serial);
}
- else if(ntohl(soa->serial) == zone->msg_new_serial) {
+ else if(ntohl(soa->serial) == zone->latest_xfr->msg_new_serial) {
/* saw another SOA of new serial. */
- if(zone->msg_is_ixfr == 1) {
- zone->msg_is_ixfr = 2; /* seen middle SOA in ixfr */
+ if(zone->latest_xfr->msg_is_ixfr == 1) {
+ zone->latest_xfr->msg_is_ixfr = 2; /* seen middle SOA in ixfr */
} else {
/* 2nd SOA for AXFR or 3rd newSOA for IXFR */
*done = 1;
}
}
- else if (zone->msg_is_ixfr) {
+ else if (zone->latest_xfr->msg_is_ixfr) {
/* some additional checks */
- if(ntohl(soa->serial) > zone->msg_new_serial) {
+ if(ntohl(soa->serial) > zone->latest_xfr->msg_new_serial) {
DEBUG(DEBUG_XFRD,1, (LOG_ERR, "xfrd: zone %s xfr "
"bad middle serial", zone->apex_str));
return 0; /* bad middle serial in IXFR */
int have_tsig = 0;
assert(zone && zone->master && zone->master->key_options
&& zone->master->key_options->tsig_key && packet);
- if(!tsig_find_rr(&zone->tsig, packet)) {
+ if(!tsig_find_rr(&zone->latest_xfr->tsig, packet)) {
log_msg(LOG_ERR, "xfrd: zone %s, from %s: malformed tsig RR",
zone->apex_str, zone->master->ip_address_spec);
return 0;
}
- if(zone->tsig.status == TSIG_OK) {
+ if(zone->latest_xfr->tsig.status == TSIG_OK) {
have_tsig = 1;
- if (zone->tsig.error_code != TSIG_ERROR_NOERROR) {
+ if (zone->latest_xfr->tsig.error_code != TSIG_ERROR_NOERROR) {
log_msg(LOG_ERR, "xfrd: zone %s, from %s: tsig error "
"(%s)", zone->apex_str,
zone->master->ip_address_spec,
- tsig_error(zone->tsig.error_code));
+ tsig_error(zone->latest_xfr->tsig.error_code));
}
}
if(have_tsig) {
/* strip the TSIG resource record off... */
- buffer_set_limit(packet, zone->tsig.position);
+ buffer_set_limit(packet, zone->latest_xfr->tsig.position);
ARCOUNT_SET(packet, ARCOUNT(packet) - 1);
}
/* keep running the TSIG hash */
- tsig_update(&zone->tsig, packet, buffer_limit(packet));
+ tsig_update(&zone->latest_xfr->tsig, packet, buffer_limit(packet));
if(have_tsig) {
- if (!tsig_verify(&zone->tsig)) {
+ if (!tsig_verify(&zone->latest_xfr->tsig)) {
log_msg(LOG_ERR, "xfrd: zone %s, from %s: bad tsig signature",
zone->apex_str, zone->master->ip_address_spec);
return 0;
DEBUG(DEBUG_XFRD,1, (LOG_INFO, "xfrd: zone %s, from %s: good tsig signature",
zone->apex_str, zone->master->ip_address_spec));
/* prepare for next tsigs */
- tsig_prepare(&zone->tsig);
+ tsig_prepare(&zone->latest_xfr->tsig);
}
- else if(zone->tsig.updates_since_last_prepare > XFRD_TSIG_MAX_UNSIGNED) {
+ else if(zone->latest_xfr->tsig.updates_since_last_prepare > XFRD_TSIG_MAX_UNSIGNED) {
/* we allow a number of non-tsig signed packets */
log_msg(LOG_INFO, "xfrd: zone %s, from %s: too many consecutive "
"packets without TSIG", zone->apex_str,
return 0;
}
- if(!have_tsig && zone->msg_seq_nr == 0) {
+ if(!have_tsig && zone->latest_xfr->msg_seq_nr == 0) {
log_msg(LOG_ERR, "xfrd: zone %s, from %s: no tsig in first packet of reply",
zone->apex_str, zone->master->ip_address_spec);
return 0;
return xfrd_packet_bad;
}
}
- if(zone->msg_rr_count == 0 && ancount == 0) {
+ if(zone->latest_xfr->msg_rr_count == 0 && ancount == 0) {
if(zone->tcp_conn == -1 && TC(packet)) {
DEBUG(DEBUG_XFRD,1, (LOG_INFO, "xfrd: TC flagged"));
return xfrd_packet_tcp;
ancount_todo = ancount;
tempregion = region_create(xalloc, free);
- if(zone->msg_rr_count == 0) {
+ if(zone->latest_xfr->msg_rr_count == 0) {
const dname_type* soaname = dname_make_from_packet(tempregion,
packet, 1, 1);
if(!soaname) { /* parse failure */
return xfrd_packet_drop;
}
DEBUG(DEBUG_XFRD,1, (LOG_INFO, "IXFR reply has ok serial (have \
-%u, reply %u).", (unsigned)ntohl(zone->soa_disk.serial), (unsigned)ntohl(soa->serial)));
+%u, reply %u).", (unsigned)zone->soa_disk_acquired ? ntohl(zone->soa_disk.serial) : 0, (unsigned)ntohl(soa->serial)));
/* serial is newer than soa_disk */
if(ancount == 1) {
/* single record means it is like a notify */
/* this AXFR/IXFR notifies me that an even newer serial exists */
zone->soa_notified.serial = soa->serial;
}
- zone->msg_new_serial = ntohl(soa->serial);
- zone->msg_rr_count = 1;
- zone->msg_is_ixfr = 0;
+ zone->latest_xfr->msg_new_serial = ntohl(soa->serial);
+ zone->latest_xfr->msg_rr_count = 1;
+ zone->latest_xfr->msg_is_ixfr = 0;
if(zone->soa_disk_acquired)
- zone->msg_old_serial = ntohl(zone->soa_disk.serial);
- else zone->msg_old_serial = 0;
+ zone->latest_xfr->msg_old_serial = ntohl(zone->soa_disk.serial);
+ else zone->latest_xfr->msg_old_serial = 0;
ancount_todo = ancount - 1;
}
if(done == 0)
return xfrd_packet_more;
if(zone->master->key_options) {
- if(zone->tsig.updates_since_last_prepare != 0) {
+ if(zone->latest_xfr->tsig.updates_since_last_prepare != 0) {
log_msg(LOG_INFO, "xfrd: last packet of reply has no "
"TSIG");
return xfrd_packet_bad;
return buf;
}
+static void
+xfrd_free_zone_xfr(xfrd_zone_type *zone, xfrd_xfr_type *xfr)
+{
+ if(xfr == zone->latest_xfr) {
+ assert(xfr->next == NULL);
+ if((zone->latest_xfr = xfr->prev) != NULL)
+ zone->latest_xfr->next = NULL;
+ } else {
+ if(xfr->next != NULL)
+ xfr->next->prev = xfr->prev;
+ if(xfr->prev != NULL)
+ xfr->prev->next = xfr->next;
+ }
+ tsig_delete_record(&xfr->tsig, xfrd->region);
+ region_recycle(xfrd->region, xfr, sizeof(*xfr));
+}
+
+void
+xfrd_delete_zone_xfr(xfrd_zone_type *zone, xfrd_xfr_type *xfr)
+{
+ if(xfr->acquired != 0 || xfr->msg_seq_nr != 0) {
+ xfrd_unlink_xfrfile(xfrd->nsd, xfr->xfrfilenumber);
+ }
+ xfrd_free_zone_xfr(zone, xfr);
+}
+
+xfrd_xfr_type *
+xfrd_prepare_zone_xfr(xfrd_zone_type *zone, uint16_t query_type)
+{
+ xfrd_xfr_type *xfr;
+
+ /* old transfer needs to be removed still? */
+ if(zone->latest_xfr != NULL && !zone->latest_xfr->acquired) {
+ xfrd_delete_zone_xfr(zone, zone->latest_xfr);
+ }
+
+ xfr = region_alloc_zero(xfrd->region, sizeof(*xfr));
+ if((xfr->prev = zone->latest_xfr) != NULL) {
+ xfr->prev->next = xfr;
+ }
+ tsig_create_record_custom(&xfr->tsig, NULL, 0, 0, 4);
+ zone->latest_xfr = xfr;
+ xfr->query_type = query_type;
+
+ return xfr;
+}
+
enum xfrd_packet_result
xfrd_handle_received_xfr_packet(xfrd_zone_type* zone, buffer_type* packet)
{
default:
{
/* rollback */
- if(zone->msg_seq_nr > 0) {
+ if(zone->latest_xfr->msg_seq_nr > 0) {
/* do not process xfr - if only one part simply ignore it. */
/* delete file with previous parts of commit */
- xfrd_unlink_xfrfile(xfrd->nsd, zone->xfrfilenumber);
+ xfrd_unlink_xfrfile(xfrd->nsd, zone->latest_xfr->xfrfilenumber);
VERBOSITY(1, (LOG_INFO, "xfrd: zone %s "
"reverted transfer %u from %s",
- zone->apex_str, zone->msg_rr_count?
- (int)zone->msg_new_serial:0,
+ zone->apex_str, zone->latest_xfr->msg_rr_count?
+ (int)zone->latest_xfr->msg_new_serial:0,
zone->master->ip_address_spec));
- zone->msg_seq_nr = 0;
+ zone->latest_xfr->msg_seq_nr = 0;
} else if (res == xfrd_packet_bad) {
VERBOSITY(1, (LOG_INFO, "xfrd: zone %s "
"bad transfer %u from %s",
- zone->apex_str, zone->msg_rr_count?
- (int)zone->msg_new_serial:0,
+ zone->apex_str, zone->latest_xfr->msg_rr_count?
+ (int)zone->latest_xfr->msg_new_serial:0,
zone->master->ip_address_spec));
}
if (res == xfrd_packet_notimpl
- && zone->query_type == TYPE_IXFR)
+ && zone->latest_xfr
+ && zone->latest_xfr->query_type == TYPE_IXFR)
return res;
else
return xfrd_packet_bad;
/* dump reply on disk to diff file */
/* if first part, get new filenumber. Numbers can wrap around, 64bit
* is enough so we do not collide with older-transfers-in-progress */
- if(zone->msg_seq_nr == 0)
- zone->xfrfilenumber = xfrd->xfrfilenumber++;
+ if(zone->latest_xfr->msg_seq_nr == 0)
+ zone->latest_xfr->xfrfilenumber = xfrd->xfrfilenumber++;
diff_write_packet(dname_to_string(zone->apex,0),
zone->zone_options->pattern->pname,
- zone->msg_old_serial, zone->msg_new_serial, zone->msg_seq_nr,
+ zone->latest_xfr->msg_old_serial,
+ zone->latest_xfr->msg_new_serial,
+ zone->latest_xfr->msg_seq_nr,
buffer_begin(packet), buffer_limit(packet), xfrd->nsd,
- zone->xfrfilenumber);
+ zone->latest_xfr->xfrfilenumber);
VERBOSITY(3, (LOG_INFO,
"xfrd: zone %s written received XFR packet from %s with serial %u to "
"disk", zone->apex_str, zone->master->ip_address_spec,
- (int)zone->msg_new_serial));
- zone->msg_seq_nr++;
+ (int)zone->latest_xfr->msg_new_serial));
+ zone->latest_xfr->msg_seq_nr++;
- xfrfile_size = xfrd_get_xfrfile_size(xfrd->nsd, zone->xfrfilenumber);
+ xfrfile_size = xfrd_get_xfrfile_size(
+ xfrd->nsd, zone->latest_xfr->xfrfilenumber);
if( zone->zone_options->pattern->size_limit_xfr != 0 &&
xfrfile_size > zone->zone_options->pattern->size_limit_xfr ) {
/* xfrd_unlink_xfrfile(xfrd->nsd, zone->xfrfilenumber);
/* done. we are completely sure of this */
buffer_clear(packet);
buffer_printf(packet, "received update to serial %u at %s from %s",
- (unsigned)zone->msg_new_serial, xfrd_pretty_time(xfrd_time()),
+ (unsigned)zone->latest_xfr->msg_new_serial, xfrd_pretty_time(xfrd_time()),
zone->master->ip_address_spec);
if(zone->master->key_options) {
buffer_printf(packet, " TSIG verified with key %s",
zone->master->key_options->name);
}
buffer_flip(packet);
- diff_write_commit(zone->apex_str, zone->msg_old_serial,
- zone->msg_new_serial, zone->msg_seq_nr, 1,
- (char*)buffer_begin(packet), xfrd->nsd, zone->xfrfilenumber);
+ diff_write_commit(zone->apex_str, zone->latest_xfr->msg_old_serial,
+ zone->latest_xfr->msg_new_serial, zone->latest_xfr->msg_seq_nr, 1,
+ (char*)buffer_begin(packet), xfrd->nsd, zone->latest_xfr->xfrfilenumber);
VERBOSITY(1, (LOG_INFO, "xfrd: zone %s committed \"%s\"",
zone->apex_str, (char*)buffer_begin(packet)));
- /* reset msg seq nr, so if that is nonnull we know xfr file exists */
- zone->msg_seq_nr = 0;
- /* now put apply_xfr task on the tasklist */
- if(!task_new_apply_xfr(xfrd->nsd->task[xfrd->nsd->mytask],
- xfrd->last_task, zone->apex, zone->msg_old_serial,
- zone->msg_new_serial, zone->xfrfilenumber)) {
- /* delete the file and pretend transfer was bad to continue */
- xfrd_unlink_xfrfile(xfrd->nsd, zone->xfrfilenumber);
- xfrd_set_reload_timeout();
- return xfrd_packet_bad;
+ /* now put apply_xfr task on the tasklist if no reload in progress */
+ if(xfrd->can_send_reload &&
+ task_new_apply_xfr(
+ xfrd->nsd->task[xfrd->nsd->mytask],
+ xfrd->last_task,
+ zone->apex,
+ zone->latest_xfr->msg_old_serial,
+ zone->latest_xfr->msg_new_serial,
+ zone->latest_xfr->xfrfilenumber))
+ {
+ zone->latest_xfr->sent = xfrd->nsd->mytask + 1;
}
+ /* reset msg seq nr, so if that is nonnull we know xfr file exists */
+ zone->latest_xfr->msg_seq_nr = 0;
/* update the disk serial no. */
- zone->soa_disk_acquired = xfrd_time();
+ zone->soa_disk_acquired = zone->latest_xfr->acquired = xfrd_time();
zone->soa_disk = soa;
if(zone->soa_notified_acquired && (
zone->soa_notified.serial == 0 ||
}
void
-xfrd_check_failed_updates()
+xfrd_check_failed_updates(void)
{
/* see if updates have not come through */
xfrd_zone_type* zone;
+ xfrd_xfr_type* xfr;
+ xfrd_xfr_type* prev_xfr;
+ uint8_t sent = (xfrd->nsd->mytask == 0) + 1;
RBTREE_FOR(zone, xfrd_zone_type*, xfrd->zones)
{
- /* zone has a disk soa, and no nsd soa or a different nsd soa */
- if(zone->soa_disk_acquired != 0 &&
+ /* skip zones without updates */
+ if(!zone->latest_xfr)
+ continue;
+ xfr = zone->latest_xfr;
+ while(!xfr->sent && xfr->prev) {
+ xfr = xfr->prev;
+ }
+
+ /* zone has sent update and no (or different) nsd soa, the
+ update must be corrupt */
+ if(xfr->sent == sent &&
(zone->soa_nsd_acquired == 0 ||
- zone->soa_disk.serial != zone->soa_nsd.serial))
+ zone->soa_nsd.serial != htonl(xfr->msg_new_serial)))
{
- if(zone->soa_disk_acquired <
- xfrd->reload_cmd_last_sent)
- {
- /* this zone should have been loaded, since its disk
- soa time is before the time of the reload cmd. */
- xfrd_soa_type dumped_soa = zone->soa_disk;
- log_msg(LOG_ERR, "xfrd: zone %s: soa serial %u "
- "update failed, restarting "
- "transfer (notified zone)",
- zone->apex_str, (unsigned)ntohl(zone->soa_disk.serial));
- /* revert the soa; it has not been acquired properly */
- if(zone->soa_disk_acquired == zone->soa_nsd_acquired) {
- /* this was the same as served,
- * perform force_axfr , re-download
- * same serial from master */
- zone->soa_disk_acquired = 0;
- zone->soa_nsd_acquired = 0;
- } else {
- /* revert soa to the one in server */
- zone->soa_disk_acquired = zone->soa_nsd_acquired;
- zone->soa_disk = zone->soa_nsd;
- }
- /* pretend we are notified with disk soa.
- This will cause a refetch of the data, and reload. */
- xfrd_handle_incoming_notify(zone, &dumped_soa);
- xfrd_set_timer_refresh(zone);
- } else if(zone->soa_disk_acquired >= xfrd->reload_cmd_last_sent) {
- /* this zone still has to be loaded,
- make sure reload is set to be sent. */
- if(xfrd->need_to_send_reload == 0 &&
- xfrd->reload_added == 0) {
- log_msg(LOG_ERR, "xfrd: zone %s: needs "
- "to be loaded. reload lost? "
- "try again", zone->apex_str);
- xfrd_set_reload_timeout();
- }
+ xfrd_soa_type soa;
+ soa.serial = htonl(xfr->msg_new_serial);
+ log_msg(LOG_ERR, "xfrd: zone %s: soa serial %u update "
+ "failed, restarting transfer "
+ "(notified zone)",
+ zone->apex_str, xfr->msg_new_serial);
+ /* revert the soa; it has not been acquired properly */
+ if(xfr->acquired == zone->soa_nsd_acquired) {
+ /* this was the same as served,
+ * perform force_axfr , re-download
+ * same serial from master */
+ zone->soa_disk_acquired = 0;
+ zone->soa_nsd_acquired = 0;
+ } else {
+ /* revert soa to the one in server */
+ zone->soa_disk_acquired = zone->soa_nsd_acquired;
+ zone->soa_disk = zone->soa_nsd;
+ }
+ /* fabricate soa and trigger notify to refetch and
+ * reload update */
+ memset(&soa, 0, sizeof(soa));
+ soa.serial = htonl(xfr->msg_new_serial);
+ xfrd_handle_incoming_notify(zone, &soa);
+ xfrd_set_timer_refresh(zone);
+ /* delete all pending updates */
+ for(xfr = zone->latest_xfr; xfr; xfr = prev_xfr) {
+ prev_xfr = xfr->prev;
+ /* skip incomplete updates */
+ if(!xfr->acquired)
+ continue;
+ DEBUG(DEBUG_IPC, 1,
+ (LOG_INFO, "xfrd: zone %s delete "
+ "update to serial %u",
+ zone->apex_str,
+ xfr->msg_new_serial));
+ xfrd_delete_zone_xfr(zone, xfr);
}
}
}
}
void
-xfrd_prepare_zones_for_reload()
+xfrd_prepare_zones_for_reload(void)
{
xfrd_zone_type* zone;
+ xfrd_xfr_type* xfr;
+ int reload, send;
+
+ send = 1;
+ reload = 0;
RBTREE_FOR(zone, xfrd_zone_type*, xfrd->zones)
{
- /* zone has a disk soa, and no nsd soa or a different nsd soa */
- if(zone->soa_disk_acquired != 0 &&
- (zone->soa_nsd_acquired == 0 ||
- zone->soa_disk.serial != zone->soa_nsd.serial))
- {
- if(zone->soa_disk_acquired == xfrd_time()) {
- /* antedate by one second.
- * this makes sure that the zone time is before
- * reload, so that check_failed_zones() is
- * certain of the result.
- */
- zone->soa_disk_acquired--;
+ xfr = zone->latest_xfr;
+ while(xfr) {
+ if(!xfr->prev)
+ break;
+ xfr = xfr->prev;
+ assert(xfr->acquired);
+ }
+
+ while(xfr && xfr->acquired) {
+ /* skip updates that arrived after failed reload */
+ if(xfrd->reload_cmd_first_sent && !xfr->sent)
+ break;
+ assert(!xfrd->reload_cmd_first_sent ||
+ xfrd->reload_cmd_first_sent >= xfr->acquired);
+ if(send) {
+ send = task_new_apply_xfr(
+ xfrd->nsd->task[xfrd->nsd->mytask],
+ xfrd->last_task,
+ zone->apex,
+ xfr->msg_old_serial,
+ xfr->msg_new_serial,
+ xfr->xfrfilenumber);
+ if(send && !reload) {
+ reload = 1;
+ xfrd_set_reload_timeout();
+ }
}
+ xfr->sent = send ? 1 + xfrd->nsd->mytask : 0;
+ xfr = xfr->next;
}
}
}
struct notify_zone;
struct udb_ptr;
typedef struct xfrd_state xfrd_state_type;
+typedef struct xfrd_xfr xfrd_xfr_type;
typedef struct xfrd_zone xfrd_zone_type;
typedef struct xfrd_soa xfrd_soa_type;
/*
int reload_added;
/* last reload must have caught all zone updates before this time */
time_t reload_cmd_last_sent;
+ time_t reload_cmd_first_sent;
+ uint8_t reload_failed;
uint8_t can_send_reload;
pid_t reload_pid;
/* timeout for lost sigchild and reaping children */
/* xfr message handling data */
/* query id */
uint16_t query_id;
+ xfrd_xfr_type *latest_xfr;
+
+ int multi_master_first_master; /* >0: first check master_num */
+ int multi_master_update_check; /* -1: not update >0: last update master_num */
+} ATTR_PACKED;
+
+/*
+ * State for a single zone XFR
+ */
+struct xfrd_xfr {
+ xfrd_xfr_type *next;
+ xfrd_xfr_type *prev;
uint16_t query_type;
+ uint8_t sent; /* written to tasklist (tri-state) */
+ time_t acquired; /* time xfr was acquired */
uint32_t msg_seq_nr; /* number of messages already handled */
uint32_t msg_old_serial, msg_new_serial; /* host byte order */
size_t msg_rr_count;
uint8_t msg_is_ixfr; /* 1:IXFR detected. 2:middle IXFR SOA seen. */
tsig_record_type tsig; /* tsig state for IXFR/AXFR */
uint64_t xfrfilenumber; /* identifier for file to store xfr into,
- valid if msg_seq_nr nonzero */
- int multi_master_first_master; /* >0: first check master_num */
- int multi_master_update_check; /* -1: not update >0: last update master_num */
-} ATTR_PACKED;
+ valid if msg_seq_nr nonzero */
+};
enum xfrd_packet_result {
xfrd_packet_bad, /* drop the packet/connection */
return within_expire_bounds(zone, ntohl(zone->soa_disk.expire));
}
+/* return the zone's expire period (from the SOA in use by the running server) */
+static inline time_t
+bound_soa_nsd_expire(xfrd_zone_type* zone)
+{
+ return within_expire_bounds(zone, ntohl(zone->soa_nsd.expire));
+}
+
extern xfrd_state_type* xfrd;
/* start xfrd, new start. Pass socket to server_main. */
finished, and all zone SOAs have been sent. */
void xfrd_check_failed_updates(void);
+void
+xfrd_prepare_updates_for_reload(void);
+
/*
* Prepare zones for a reload, this sets the times on the zones to be
* before the current time, so the reload happens after.
const char* xfrd_pretty_time(time_t v);
+xfrd_xfr_type *xfrd_prepare_zone_xfr(xfrd_zone_type *zone, uint16_t query_type);
+
+void xfrd_delete_zone_xfr(xfrd_zone_type *zone, xfrd_xfr_type *xfr);
+
#endif /* XFRD_H */