couple of post-1.5.9 patches).
tests from Mark Patruck, danj, matthieu, millert. reads ok to jung, ok florian.
util/netevent.c util/net_help.c util/random.c util/rbtree.c util/regional.c \
util/rtt.c util/storage/dnstree.c util/storage/lookup3.c \
util/storage/lruhash.c util/storage/slabhash.c util/timehist.c util/tube.c \
-util/winsock_event.c validator/autotrust.c validator/val_anchor.c \
-validator/validator.c validator/val_kcache.c validator/val_kentry.c \
-validator/val_neg.c validator/val_nsec3.c validator/val_nsec.c \
-validator/val_secalgo.c validator/val_sigcrypt.c \
-validator/val_utils.c dns64/dns64.c $(CHECKLOCK_SRC) $(DNSTAP_SRC)
+util/ub_event.c util/ub_event_pluggable.c util/winsock_event.c \
+validator/autotrust.c validator/val_anchor.c validator/validator.c \
+validator/val_kcache.c validator/val_kentry.c validator/val_neg.c \
+validator/val_nsec3.c validator/val_nsec.c validator/val_secalgo.c \
+validator/val_sigcrypt.c validator/val_utils.c dns64/dns64.c cachedb/cachedb.c $(CHECKLOCK_SRC) \
+$(DNSTAP_SRC)
COMMON_OBJ_WITHOUT_NETCALL=dns.lo infra.lo rrset.lo dname.lo msgencode.lo \
as112.lo msgparse.lo msgreply.lo packed_rrset.lo iterator.lo iter_delegpt.lo \
iter_donotq.lo iter_fwd.lo iter_hints.lo iter_priv.lo iter_resptype.lo \
random.lo rbtree.lo regional.lo rtt.lo dnstree.lo lookup3.lo lruhash.lo \
slabhash.lo timehist.lo tube.lo winsock_event.lo autotrust.lo val_anchor.lo \
validator.lo val_kcache.lo val_kentry.lo val_neg.lo val_nsec3.lo val_nsec.lo \
-val_secalgo.lo val_sigcrypt.lo val_utils.lo dns64.lo \
+val_secalgo.lo val_sigcrypt.lo val_utils.lo dns64.lo cachedb.lo \
$(PYTHONMOD_OBJ) $(CHECKLOCK_OBJ) $(DNSTAP_OBJ)
-COMMON_OBJ=$(COMMON_OBJ_WITHOUT_NETCALL) netevent.lo listen_dnsport.lo \
+COMMON_OBJ_WITHOUT_UB_EVENT=$(COMMON_OBJ_WITHOUT_NETCALL) netevent.lo listen_dnsport.lo \
outside_network.lo
+COMMON_OBJ=$(COMMON_OBJ_WITHOUT_UB_EVENT) ub_event.lo
# set to $COMMON_OBJ or to "" if --enableallsymbols
COMMON_OBJ_ALL_SYMBOLS=@COMMON_OBJ_ALL_SYMBOLS@
COMPAT_SRC=compat/ctime_r.c compat/fake-rfc2553.c compat/gmtime_r.c \
compat/strlcpy.c compat/strptime.c compat/getentropy_linux.c \
compat/getentropy_osx.c compat/getentropy_solaris.c compat/getentropy_win.c \
compat/explicit_bzero.c compat/arc4random.c compat/arc4random_uniform.c \
-compat/arc4_lock.c compat/sha512.c compat/reallocarray.c compat/isblank.c
+compat/arc4_lock.c compat/sha512.c compat/reallocarray.c compat/isblank.c \
+compat/strsep.c
COMPAT_OBJ=$(LIBOBJS:.o=.lo)
COMPAT_OBJ_WITHOUT_CTIME=$(LIBOBJ_WITHOUT_CTIME:.o=.lo)
COMPAT_OBJ_WITHOUT_CTIMEARC4=$(LIBOBJ_WITHOUT_CTIMEARC4:.o=.lo)
testcode/replay.c testcode/fake_event.c
TESTBOUND_OBJ=testbound.lo replay.lo fake_event.lo
TESTBOUND_OBJ_LINK=$(TESTBOUND_OBJ) testpkts.lo worker.lo acl_list.lo \
-daemon.lo stats.lo $(COMMON_OBJ_WITHOUT_NETCALL) $(SLDNS_OBJ) $(COMPAT_OBJ)
+daemon.lo stats.lo $(COMMON_OBJ_WITHOUT_NETCALL) ub_event.lo $(SLDNS_OBJ) \
+$(COMPAT_OBJ)
LOCKVERIFY_SRC=testcode/lock_verify.c
LOCKVERIFY_OBJ=lock_verify.lo
LOCKVERIFY_OBJ_LINK=$(LOCKVERIFY_OBJ) worker_cb.lo $(COMMON_OBJ) $(COMPAT_OBJ) \
$(SLDNS_OBJ)
LIBUNBOUND_SRC=libunbound/context.c libunbound/libunbound.c \
libunbound/libworker.c
-LIBUNBOUND_OBJ=context.lo libunbound.lo libworker.lo
-LIBUNBOUND_OBJ_LINK=$(LIBUNBOUND_OBJ) $(COMMON_OBJ) $(SLDNS_OBJ) $(COMPAT_OBJ)
+LIBUNBOUND_OBJ=context.lo libunbound.lo libworker.lo ub_event_pluggable.lo
+LIBUNBOUND_OBJ_LINK=$(LIBUNBOUND_OBJ) $(COMMON_OBJ_WITHOUT_UB_EVENT) $(SLDNS_OBJ) $(COMPAT_OBJ)
# win apps or "" if not on windows
WINAPPS=@WINAPPS@
@echo "You still need to remove "`dirname $(DESTDIR)$(configfile)`" , $(DESTDIR)$(configfile) by hand"
iana_update:
- curl -o port-numbers.tmp http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xml --compressed
+ curl -o port-numbers.tmp https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xml --compressed
if file port-numbers.tmp | grep 'gzip' >/dev/null; then zcat port-numbers.tmp; else cat port-numbers.tmp; fi | awk '/<record>/ {p=0;} /<protocol>udp/ {p=1;} /<protocol>[^u]/ {p=0;} /Decomissioned|Decommissioned|Removed|De-registered|unassigned|Unassigned|Reserved/ {u=1;} /<number>/ { if(u==1) {u=0;} else { if(p==1) { match($$0,/[0-9]+/); print substr($$0, RSTART, RLENGTH) ","}}}' | sort -nu > util/iana_ports.inc
rm -f port-numbers.tmp
rm -f $(DEPEND_TMP) $(DEPEND_TMP2)
# Dependencies
-as112.lo as112.o: $(srcdir)/util/as112.c $(srcdir)/util/as112.h
dns.lo dns.o: $(srcdir)/services/cache/dns.c config.h $(srcdir)/iterator/iter_delegpt.h $(srcdir)/util/log.h \
$(srcdir)/validator/val_nsec.h $(srcdir)/util/data/packed_rrset.h $(srcdir)/util/storage/lruhash.h \
$(srcdir)/util/locks.h $(srcdir)/services/cache/dns.h $(srcdir)/util/data/msgreply.h \
$(srcdir)/util/storage/lruhash.h $(srcdir)/util/locks.h $(srcdir)/util/log.h $(srcdir)/util/storage/slabhash.h \
$(srcdir)/util/data/packed_rrset.h $(srcdir)/sldns/rrdef.h $(srcdir)/util/config_file.h \
$(srcdir)/util/data/msgreply.h $(srcdir)/util/regional.h $(srcdir)/util/alloc.h
+as112.lo as112.o: $(srcdir)/util/as112.c $(srcdir)/util/as112.h
dname.lo dname.o: $(srcdir)/util/data/dname.c config.h $(srcdir)/util/data/dname.h \
$(srcdir)/util/storage/lruhash.h $(srcdir)/util/locks.h $(srcdir)/util/log.h $(srcdir)/util/data/msgparse.h \
$(srcdir)/sldns/pkthdr.h $(srcdir)/sldns/rrdef.h $(srcdir)/util/storage/lookup3.h $(srcdir)/sldns/sbuffer.h
$(srcdir)/sldns/sbuffer.h
msgparse.lo msgparse.o: $(srcdir)/util/data/msgparse.c config.h $(srcdir)/util/data/msgparse.h \
$(srcdir)/util/storage/lruhash.h $(srcdir)/util/locks.h $(srcdir)/util/log.h $(srcdir)/sldns/pkthdr.h \
- $(srcdir)/sldns/rrdef.h $(srcdir)/util/data/dname.h $(srcdir)/util/data/packed_rrset.h \
- $(srcdir)/util/storage/lookup3.h $(srcdir)/util/regional.h $(srcdir)/sldns/sbuffer.h $(srcdir)/sldns/parseutil.h \
- $(srcdir)/sldns/wire2str.h
+ $(srcdir)/sldns/rrdef.h $(srcdir)/util/data/msgreply.h $(srcdir)/util/data/packed_rrset.h \
+ $(srcdir)/util/data/dname.h $(srcdir)/util/storage/lookup3.h $(srcdir)/util/regional.h $(srcdir)/sldns/sbuffer.h \
+ $(srcdir)/sldns/parseutil.h $(srcdir)/sldns/wire2str.h
msgreply.lo msgreply.o: $(srcdir)/util/data/msgreply.c config.h $(srcdir)/util/data/msgreply.h \
$(srcdir)/util/storage/lruhash.h $(srcdir)/util/locks.h $(srcdir)/util/log.h $(srcdir)/util/data/packed_rrset.h \
$(srcdir)/util/storage/lookup3.h $(srcdir)/util/alloc.h $(srcdir)/util/netevent.h $(srcdir)/util/net_help.h \
$(srcdir)/sldns/pkthdr.h $(srcdir)/sldns/rrdef.h $(srcdir)/util/fptr_wlist.h $(srcdir)/util/netevent.h \
$(srcdir)/util/tube.h $(srcdir)/services/mesh.h $(srcdir)/util/rbtree.h $(srcdir)/dns64/dns64.h \
$(srcdir)/iterator/iterator.h $(srcdir)/services/outbound_list.h $(srcdir)/validator/validator.h \
- $(srcdir)/validator/val_utils.h
+ $(srcdir)/validator/val_utils.h $(PYTHONMOD_HEADER) $(srcdir)/cachedb/cachedb.h
outbound_list.lo outbound_list.o: $(srcdir)/services/outbound_list.c config.h \
$(srcdir)/services/outbound_list.h $(srcdir)/services/outside_network.h $(srcdir)/util/rbtree.h \
$(srcdir)/util/netevent.h
$(srcdir)/validator/val_nsec3.h $(srcdir)/validator/val_sigcrypt.h $(srcdir)/validator/val_kentry.h \
$(srcdir)/validator/val_neg.h $(srcdir)/validator/autotrust.h $(srcdir)/libunbound/libworker.h \
$(srcdir)/libunbound/context.h $(srcdir)/util/alloc.h $(srcdir)/libunbound/unbound.h \
- $(srcdir)/libunbound/worker.h $(srcdir)/sldns/sbuffer.h $(srcdir)/util/config_file.h
+ $(srcdir)/libunbound/worker.h $(srcdir)/sldns/sbuffer.h $(srcdir)/util/config_file.h \
+ $(PYTHONMOD_HEADER) $(srcdir)/cachedb/cachedb.h
locks.lo locks.o: $(srcdir)/util/locks.c config.h $(srcdir)/util/locks.h $(srcdir)/util/log.h
log.lo log.o: $(srcdir)/util/log.c config.h $(srcdir)/util/log.h $(srcdir)/util/locks.h $(srcdir)/sldns/sbuffer.h
mini_event.lo mini_event.o: $(srcdir)/util/mini_event.c config.h $(srcdir)/util/mini_event.h $(srcdir)/util/rbtree.h \
module.lo module.o: $(srcdir)/util/module.c config.h $(srcdir)/util/module.h $(srcdir)/util/storage/lruhash.h \
$(srcdir)/util/locks.h $(srcdir)/util/log.h $(srcdir)/util/data/msgreply.h $(srcdir)/util/data/packed_rrset.h \
$(srcdir)/util/data/msgparse.h $(srcdir)/sldns/pkthdr.h $(srcdir)/sldns/rrdef.h
-netevent.lo netevent.o: $(srcdir)/util/netevent.c config.h $(srcdir)/util/netevent.h $(srcdir)/util/log.h \
- $(srcdir)/util/net_help.h $(srcdir)/util/fptr_wlist.h $(srcdir)/util/storage/lruhash.h $(srcdir)/util/locks.h \
- $(srcdir)/util/module.h $(srcdir)/util/data/msgreply.h $(srcdir)/util/data/packed_rrset.h \
+netevent.lo netevent.o: $(srcdir)/util/netevent.c config.h $(srcdir)/util/netevent.h $(srcdir)/util/ub_event.h \
+ $(srcdir)/util/log.h $(srcdir)/util/net_help.h $(srcdir)/util/fptr_wlist.h $(srcdir)/util/storage/lruhash.h \
+ $(srcdir)/util/locks.h $(srcdir)/util/module.h $(srcdir)/util/data/msgreply.h $(srcdir)/util/data/packed_rrset.h \
$(srcdir)/util/data/msgparse.h $(srcdir)/sldns/pkthdr.h $(srcdir)/sldns/rrdef.h $(srcdir)/util/tube.h \
$(srcdir)/services/mesh.h $(srcdir)/util/rbtree.h $(srcdir)/services/modstack.h $(srcdir)/sldns/sbuffer.h \
- $(srcdir)/dnstap/dnstap.h $(srcdir)/util/mini_event.h $(srcdir)/util/rbtree.h
+ $(srcdir)/dnstap/dnstap.h
net_help.lo net_help.o: $(srcdir)/util/net_help.c config.h $(srcdir)/util/net_help.h $(srcdir)/util/log.h \
$(srcdir)/util/data/dname.h $(srcdir)/util/storage/lruhash.h $(srcdir)/util/locks.h $(srcdir)/util/module.h \
$(srcdir)/util/data/msgreply.h $(srcdir)/util/data/packed_rrset.h $(srcdir)/util/data/msgparse.h \
$(srcdir)/util/netevent.h $(srcdir)/util/fptr_wlist.h $(srcdir)/util/storage/lruhash.h $(srcdir)/util/locks.h \
$(srcdir)/util/module.h $(srcdir)/util/data/msgreply.h $(srcdir)/util/data/packed_rrset.h \
$(srcdir)/util/data/msgparse.h $(srcdir)/sldns/pkthdr.h $(srcdir)/sldns/rrdef.h $(srcdir)/services/mesh.h \
- $(srcdir)/util/rbtree.h $(srcdir)/services/modstack.h
+ $(srcdir)/util/rbtree.h $(srcdir)/services/modstack.h $(srcdir)/util/ub_event.h
+ub_event.lo ub_event.o: $(srcdir)/util/ub_event.c config.h $(srcdir)/util/ub_event.h $(srcdir)/util/log.h \
+ $(srcdir)/util/netevent.h $(srcdir)/util/tube.h $(srcdir)/util/mini_event.h $(srcdir)/util/rbtree.h
+ub_event_pluggable.lo ub_event_pluggable.o: $(srcdir)/util/ub_event_pluggable.c config.h $(srcdir)/util/ub_event.h \
+ $(srcdir)/libunbound/unbound-event.h $(srcdir)/util/netevent.h $(srcdir)/util/log.h $(srcdir)/util/fptr_wlist.h \
+ $(srcdir)/util/storage/lruhash.h $(srcdir)/util/locks.h $(srcdir)/util/module.h $(srcdir)/util/data/msgreply.h \
+ $(srcdir)/util/data/packed_rrset.h $(srcdir)/util/data/msgparse.h $(srcdir)/sldns/pkthdr.h \
+ $(srcdir)/sldns/rrdef.h $(srcdir)/util/tube.h $(srcdir)/services/mesh.h $(srcdir)/util/rbtree.h \
+ $(srcdir)/services/modstack.h $(srcdir)/util/mini_event.h $(srcdir)/util/rbtree.h
winsock_event.lo winsock_event.o: $(srcdir)/util/winsock_event.c config.h
autotrust.lo autotrust.o: $(srcdir)/validator/autotrust.c config.h $(srcdir)/validator/autotrust.h \
$(srcdir)/util/rbtree.h $(srcdir)/util/data/packed_rrset.h $(srcdir)/util/storage/lruhash.h \
val_anchor.lo val_anchor.o: $(srcdir)/validator/val_anchor.c config.h $(srcdir)/validator/val_anchor.h \
$(srcdir)/util/rbtree.h $(srcdir)/util/locks.h $(srcdir)/util/log.h $(srcdir)/validator/val_sigcrypt.h \
$(srcdir)/util/data/packed_rrset.h $(srcdir)/util/storage/lruhash.h $(srcdir)/validator/autotrust.h \
- $(srcdir)/util/data/dname.h $(srcdir)/util/net_help.h $(srcdir)/util/config_file.h $(srcdir)/sldns/sbuffer.h \
- $(srcdir)/sldns/rrdef.h $(srcdir)/sldns/str2wire.h $(srcdir)/util/as112.h
+ $(srcdir)/util/data/dname.h $(srcdir)/util/net_help.h $(srcdir)/util/config_file.h $(srcdir)/util/as112.h \
+ $(srcdir)/sldns/sbuffer.h $(srcdir)/sldns/rrdef.h $(srcdir)/sldns/str2wire.h
validator.lo validator.o: $(srcdir)/validator/validator.c config.h $(srcdir)/validator/validator.h \
$(srcdir)/util/module.h $(srcdir)/util/storage/lruhash.h $(srcdir)/util/locks.h $(srcdir)/util/log.h \
$(srcdir)/util/data/msgreply.h $(srcdir)/util/data/packed_rrset.h $(srcdir)/util/data/msgparse.h \
$(srcdir)/validator/val_sigcrypt.h $(srcdir)/validator/val_anchor.h $(srcdir)/util/rbtree.h \
$(srcdir)/validator/val_nsec.h $(srcdir)/validator/val_neg.h $(srcdir)/services/cache/rrset.h \
$(srcdir)/util/storage/slabhash.h $(srcdir)/services/cache/dns.h $(srcdir)/util/data/dname.h \
- $(srcdir)/util/net_help.h $(srcdir)/util/regional.h
+ $(srcdir)/util/net_help.h $(srcdir)/util/regional.h $(srcdir)/sldns/wire2str.h $(srcdir)/sldns/parseutil.h
dns64.lo dns64.o: $(srcdir)/dns64/dns64.c config.h $(srcdir)/dns64/dns64.h $(srcdir)/util/module.h \
$(srcdir)/util/storage/lruhash.h $(srcdir)/util/locks.h $(srcdir)/util/log.h $(srcdir)/util/data/msgreply.h \
$(srcdir)/util/data/packed_rrset.h $(srcdir)/util/data/msgparse.h $(srcdir)/sldns/pkthdr.h \
$(srcdir)/util/storage/slabhash.h $(srcdir)/util/config_file.h $(srcdir)/util/fptr_wlist.h \
$(srcdir)/util/netevent.h $(srcdir)/util/tube.h $(srcdir)/services/mesh.h $(srcdir)/util/rbtree.h \
$(srcdir)/services/modstack.h $(srcdir)/util/net_help.h $(srcdir)/util/regional.h
+cachedb.lo cachedb.o: $(srcdir)/cachedb/cachedb.c config.h $(srcdir)/cachedb/cachedb.h $(srcdir)/util/module.h \
+ $(srcdir)/util/storage/lruhash.h $(srcdir)/util/locks.h $(srcdir)/util/log.h $(srcdir)/util/data/msgreply.h \
+ $(srcdir)/util/data/packed_rrset.h $(srcdir)/util/data/msgparse.h $(srcdir)/sldns/pkthdr.h \
+ $(srcdir)/sldns/rrdef.h $(srcdir)/util/regional.h $(srcdir)/util/net_help.h $(srcdir)/util/config_file.h \
+ $(srcdir)/util/data/msgencode.h $(srcdir)/services/cache/dns.h $(srcdir)/validator/val_neg.h \
+ $(srcdir)/util/rbtree.h $(srcdir)/validator/val_secalgo.h $(srcdir)/iterator/iter_utils.h \
+ $(srcdir)/iterator/iter_resptype.h $(srcdir)/sldns/parseutil.h $(srcdir)/sldns/wire2str.h \
+ $(srcdir)/sldns/sbuffer.h
checklocks.lo checklocks.o: $(srcdir)/testcode/checklocks.c config.h $(srcdir)/util/locks.h $(srcdir)/util/log.h \
$(srcdir)/testcode/checklocks.h
unitanchor.lo unitanchor.o: $(srcdir)/testcode/unitanchor.c config.h $(srcdir)/util/log.h $(srcdir)/util/data/dname.h \
$(srcdir)/util/storage/dnstree.h $(srcdir)/util/rbtree.h $(srcdir)/util/rtt.h $(srcdir)/util/fptr_wlist.h \
$(srcdir)/util/module.h $(srcdir)/util/data/msgreply.h $(srcdir)/util/data/msgparse.h $(srcdir)/sldns/pkthdr.h \
$(srcdir)/sldns/rrdef.h $(srcdir)/util/tube.h $(srcdir)/services/mesh.h $(srcdir)/util/net_help.h \
- $(srcdir)/util/mini_event.h $(srcdir)/util/rbtree.h
+ $(srcdir)/util/ub_event.h
worker.lo worker.o: $(srcdir)/daemon/worker.c config.h $(srcdir)/util/log.h $(srcdir)/util/net_help.h \
$(srcdir)/util/random.h $(srcdir)/daemon/worker.h $(srcdir)/libunbound/worker.h $(srcdir)/sldns/sbuffer.h \
$(srcdir)/util/data/packed_rrset.h $(srcdir)/util/storage/lruhash.h $(srcdir)/util/locks.h \
$(srcdir)/util/data/packed_rrset.h $(srcdir)/services/cache/infra.h $(srcdir)/util/storage/dnstree.h \
$(srcdir)/util/rtt.h $(srcdir)/util/fptr_wlist.h $(srcdir)/util/module.h $(srcdir)/util/data/msgreply.h \
$(srcdir)/util/data/msgparse.h $(srcdir)/sldns/pkthdr.h $(srcdir)/sldns/rrdef.h $(srcdir)/util/tube.h \
- $(srcdir)/services/mesh.h $(srcdir)/util/net_help.h $(srcdir)/util/mini_event.h $(srcdir)/util/rbtree.h
+ $(srcdir)/services/mesh.h $(srcdir)/util/net_help.h $(srcdir)/util/ub_event.h
testpkts.lo testpkts.o: $(srcdir)/testcode/testpkts.c config.h $(srcdir)/testcode/testpkts.h \
$(srcdir)/util/net_help.h $(srcdir)/util/log.h $(srcdir)/sldns/sbuffer.h $(srcdir)/sldns/rrdef.h $(srcdir)/sldns/pkthdr.h \
$(srcdir)/sldns/str2wire.h $(srcdir)/sldns/wire2str.h
$(srcdir)/iterator/iterator.h $(srcdir)/services/outbound_list.h $(srcdir)/iterator/iter_fwd.h \
$(srcdir)/util/rbtree.h $(srcdir)/iterator/iter_hints.h $(srcdir)/util/storage/dnstree.h \
$(srcdir)/validator/validator.h $(srcdir)/validator/val_utils.h $(srcdir)/services/localzone.h \
- $(srcdir)/sldns/sbuffer.h
+ $(srcdir)/sldns/sbuffer.h $(PYTHONMOD_HEADER)
worker_cb.lo worker_cb.o: $(srcdir)/smallapp/worker_cb.c config.h $(srcdir)/libunbound/context.h \
$(srcdir)/util/locks.h $(srcdir)/util/log.h $(srcdir)/util/alloc.h $(srcdir)/util/rbtree.h $(srcdir)/services/modstack.h \
$(srcdir)/libunbound/unbound.h $(srcdir)/util/data/packed_rrset.h $(srcdir)/util/storage/lruhash.h \
$(srcdir)/util/data/packed_rrset.h $(srcdir)/util/storage/lruhash.h $(srcdir)/libunbound/libworker.h \
$(srcdir)/util/config_file.h $(srcdir)/util/module.h $(srcdir)/util/data/msgreply.h \
$(srcdir)/util/data/msgparse.h $(srcdir)/sldns/pkthdr.h $(srcdir)/sldns/rrdef.h $(srcdir)/util/regional.h \
- $(srcdir)/util/random.h $(srcdir)/util/net_help.h $(srcdir)/util/tube.h $(srcdir)/services/localzone.h \
- $(srcdir)/services/cache/infra.h $(srcdir)/util/storage/dnstree.h $(srcdir)/util/rtt.h \
- $(srcdir)/services/cache/rrset.h $(srcdir)/util/storage/slabhash.h $(srcdir)/sldns/sbuffer.h
+ $(srcdir)/util/random.h $(srcdir)/util/net_help.h $(srcdir)/util/tube.h $(srcdir)/util/ub_event.h \
+ $(srcdir)/services/localzone.h $(srcdir)/services/cache/infra.h $(srcdir)/util/storage/dnstree.h \
+ $(srcdir)/util/rtt.h $(srcdir)/services/cache/rrset.h $(srcdir)/util/storage/slabhash.h \
+ $(srcdir)/sldns/sbuffer.h
libworker.lo libworker.o: $(srcdir)/libunbound/libworker.c config.h $(srcdir)/libunbound/libworker.h \
$(srcdir)/util/data/packed_rrset.h $(srcdir)/util/storage/lruhash.h $(srcdir)/util/locks.h $(srcdir)/util/log.h \
$(srcdir)/libunbound/context.h $(srcdir)/util/alloc.h $(srcdir)/util/rbtree.h $(srcdir)/services/modstack.h \
$(srcdir)/util/data/packed_rrset.h $(srcdir)/util/data/msgparse.h $(srcdir)/sldns/pkthdr.h \
$(srcdir)/sldns/rrdef.h $(srcdir)/util/netevent.h $(srcdir)/util/net_help.h $(srcdir)/services/cache/dns.h \
$(srcdir)/services/cache/rrset.h $(srcdir)/util/storage/slabhash.h $(srcdir)/util/regional.h \
- $(srcdir)/iterator/iter_delegpt.h $(srcdir)/sldns/sbuffer.h
+ $(srcdir)/iterator/iter_delegpt.h $(srcdir)/sldns/sbuffer.h \
+
win_svc.lo win_svc.o: $(srcdir)/winrc/win_svc.c config.h $(srcdir)/winrc/win_svc.h $(srcdir)/winrc/w_inst.h \
$(srcdir)/daemon/daemon.h $(srcdir)/util/locks.h $(srcdir)/util/log.h $(srcdir)/util/alloc.h $(srcdir)/services/modstack.h \
$(srcdir)/daemon/worker.h $(srcdir)/libunbound/worker.h \
$(srcdir)/sldns/sbuffer.h $(srcdir)/util/data/packed_rrset.h $(srcdir)/util/storage/lruhash.h \
$(srcdir)/util/netevent.h $(srcdir)/util/data/msgreply.h $(srcdir)/util/data/msgparse.h $(srcdir)/sldns/pkthdr.h \
$(srcdir)/sldns/rrdef.h $(srcdir)/daemon/stats.h $(srcdir)/util/timehist.h $(srcdir)/util/module.h \
- $(srcdir)/dnstap/dnstap.h $(srcdir)/daemon/remote.h $(srcdir)/util/config_file.h $(srcdir)/util/winsock_event.h
+ $(srcdir)/dnstap/dnstap.h $(srcdir)/daemon/remote.h $(srcdir)/util/config_file.h $(srcdir)/util/ub_event.h
w_inst.lo w_inst.o: $(srcdir)/winrc/w_inst.c config.h $(srcdir)/winrc/w_inst.h $(srcdir)/winrc/win_svc.h
unbound-service-install.lo unbound-service-install.o: $(srcdir)/winrc/unbound-service-install.c config.h \
$(srcdir)/winrc/w_inst.h
strlcat.lo strlcat.o: $(srcdir)/compat/strlcat.c config.h
strlcpy.lo strlcpy.o: $(srcdir)/compat/strlcpy.c config.h
strptime.lo strptime.o: $(srcdir)/compat/strptime.c config.h
+strsep.lo strsep.o: $(srcdir)/compat/strsep.c config.h
getentropy_linux.lo getentropy_linux.o: $(srcdir)/compat/getentropy_linux.c config.h
getentropy_osx.lo getentropy_osx.o: $(srcdir)/compat/getentropy_osx.c config.h
getentropy_solaris.lo getentropy_solaris.o: $(srcdir)/compat/getentropy_solaris.c config.h
# Copyright 2009, Wouter Wijngaards, NLnet Labs.
# BSD licensed.
#
-# Version 32
+# Version 34
+# 2016-03-21 Check -ldl -pthread for libcrypto for ldns and openssl 1.1.0.
+# 2016-03-21 Use HMAC_Update instead of HMAC_CTX_Init (for openssl-1.1.0).
# 2016-01-04 -D_DEFAULT_SOURCE defined with -D_BSD_SOURCE for Linux glibc 2.20
# 2015-12-11 FLTO check for new OSX, clang.
# 2015-11-18 spelling check fix.
ACX_RUNTIME_PATH_ADD([$ssldir/lib])
fi
- AC_MSG_CHECKING([for HMAC_CTX_init in -lcrypto])
+ AC_MSG_CHECKING([for HMAC_Update in -lcrypto])
LIBS="$LIBS -lcrypto"
LIBSSL_LIBS="$LIBSSL_LIBS -lcrypto"
AC_TRY_LINK(, [
- int HMAC_CTX_init(void);
- (void)HMAC_CTX_init();
+ int HMAC_Update(void);
+ (void)HMAC_Update();
], [
AC_MSG_RESULT(yes)
- AC_DEFINE([HAVE_HMAC_CTX_INIT], 1,
- [If you have HMAC_CTX_init])
+ AC_DEFINE([HAVE_HMAC_UPDATE], 1,
+ [If you have HMAC_Update])
], [
AC_MSG_RESULT(no)
# check if -lwsock32 or -lgdi32 are needed.
LIBSSL_LIBS="$LIBSSL_LIBS -lgdi32"
AC_MSG_CHECKING([if -lcrypto needs -lgdi32])
AC_TRY_LINK([], [
- int HMAC_CTX_init(void);
- (void)HMAC_CTX_init();
+ int HMAC_Update(void);
+ (void)HMAC_Update();
],[
- AC_DEFINE([HAVE_HMAC_CTX_INIT], 1,
- [If you have HMAC_CTX_init])
+ AC_DEFINE([HAVE_HMAC_UPDATE], 1,
+ [If you have HMAC_Update])
AC_MSG_RESULT(yes)
],[
AC_MSG_RESULT(no)
LIBSSL_LIBS="$LIBSSL_LIBS -ldl"
AC_MSG_CHECKING([if -lcrypto needs -ldl])
AC_TRY_LINK([], [
- int HMAC_CTX_init(void);
- (void)HMAC_CTX_init();
+ int HMAC_Update(void);
+ (void)HMAC_Update();
],[
- AC_DEFINE([HAVE_HMAC_CTX_INIT], 1,
- [If you have HMAC_CTX_init])
+ AC_DEFINE([HAVE_HMAC_UPDATE], 1,
+ [If you have HMAC_Update])
AC_MSG_RESULT(yes)
],[
AC_MSG_RESULT(no)
- AC_MSG_ERROR([OpenSSL found in $ssldir, but version 0.9.7 or higher is required])
+ LIBS="$BAKLIBS"
+ LIBSSL_LIBS="$BAKSSLLIBS"
+ LIBS="$LIBS -ldl -pthread"
+ LIBSSL_LIBS="$LIBSSL_LIBS -ldl -pthread"
+ AC_MSG_CHECKING([if -lcrypto needs -ldl -pthread])
+ AC_TRY_LINK([], [
+ int HMAC_Update(void);
+ (void)HMAC_Update();
+ ],[
+ AC_DEFINE([HAVE_HMAC_UPDATE], 1,
+ [If you have HMAC_Update])
+ AC_MSG_RESULT(yes)
+ ],[
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR([OpenSSL found in $ssldir, but version 0.9.7 or higher is required])
+ ])
])
])
])
--- /dev/null
+/*
+ * cachedb/cachedb.c - cache from a database external to the program module
+ *
+ * Copyright (c) 2016, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains a module that uses an external database to cache
+ * dns responses.
+ */
+
+#include "config.h"
+#ifdef USE_CACHEDB
+#include "cachedb/cachedb.h"
+#include "util/regional.h"
+#include "util/net_help.h"
+#include "util/config_file.h"
+#include "util/data/msgreply.h"
+#include "util/data/msgencode.h"
+#include "services/cache/dns.h"
+#include "validator/val_neg.h"
+#include "validator/val_secalgo.h"
+#include "iterator/iter_utils.h"
+#include "sldns/parseutil.h"
+#include "sldns/wire2str.h"
+#include "sldns/sbuffer.h"
+
+#define CACHEDB_HASHSIZE 256 /* bit hash */
+
+/** the unit test testframe for cachedb, its module state contains
+ * a cache for a couple queries (in memory). */
+struct testframe_moddata {
+ /** key for single stored data element, NULL if none */
+ char* stored_key;
+ /** data for single stored data element, NULL if none */
+ uint8_t* stored_data;
+ /** length of stored data */
+ size_t stored_datalen;
+};
+
+static int
+testframe_init(struct module_env* env, struct cachedb_env* cachedb_env)
+{
+ (void)env;
+ verbose(VERB_ALGO, "testframe_init");
+ cachedb_env->backend_data = (void*)calloc(1,
+ sizeof(struct testframe_moddata));
+ if(!cachedb_env->backend_data) {
+ log_err("out of memory");
+ return 0;
+ }
+ return 1;
+}
+
+static void
+testframe_deinit(struct module_env* env, struct cachedb_env* cachedb_env)
+{
+ struct testframe_moddata* d = (struct testframe_moddata*)
+ cachedb_env->backend_data;
+ (void)env;
+ verbose(VERB_ALGO, "testframe_deinit");
+ if(!d)
+ return;
+ free(d->stored_key);
+ free(d->stored_data);
+ free(d);
+}
+
+static int
+testframe_lookup(struct module_env* env, struct cachedb_env* cachedb_env,
+ char* key, struct sldns_buffer* result_buffer)
+{
+ struct testframe_moddata* d = (struct testframe_moddata*)
+ cachedb_env->backend_data;
+ (void)env;
+ verbose(VERB_ALGO, "testframe_lookup of %s", key);
+ if(d->stored_key && strcmp(d->stored_key, key) == 0) {
+ if(d->stored_datalen > sldns_buffer_capacity(result_buffer))
+ return 0; /* too large */
+ verbose(VERB_ALGO, "testframe_lookup found %d bytes",
+ (int)d->stored_datalen);
+ sldns_buffer_clear(result_buffer);
+ sldns_buffer_write(result_buffer, d->stored_data,
+ d->stored_datalen);
+ sldns_buffer_flip(result_buffer);
+ return 1;
+ }
+ return 0;
+}
+
+static void
+testframe_store(struct module_env* env, struct cachedb_env* cachedb_env,
+ char* key, uint8_t* data, size_t data_len)
+{
+ struct testframe_moddata* d = (struct testframe_moddata*)
+ cachedb_env->backend_data;
+ (void)env;
+ verbose(VERB_ALGO, "testframe_store %s (%d bytes)", key, (int)data_len);
+
+ /* free old data element (if any) */
+ free(d->stored_key);
+ d->stored_key = NULL;
+ free(d->stored_data);
+ d->stored_data = NULL;
+ d->stored_datalen = 0;
+
+ d->stored_data = memdup(data, data_len);
+ if(!d->stored_data) {
+ log_err("out of memory");
+ return;
+ }
+ d->stored_datalen = data_len;
+ d->stored_key = strdup(key);
+ if(!d->stored_key) {
+ free(d->stored_data);
+ d->stored_data = NULL;
+ d->stored_datalen = 0;
+ return;
+ }
+ /* (key,data) successfully stored */
+}
+
+/** The testframe backend is for unit tests */
+static struct cachedb_backend testframe_backend = { "testframe",
+ testframe_init, testframe_deinit, testframe_lookup, testframe_store
+};
+
+/** find a particular backend from possible backends */
+static struct cachedb_backend*
+cachedb_find_backend(const char* str)
+{
+ if(strcmp(str, testframe_backend.name) == 0)
+ return &testframe_backend;
+ /* TODO add more backends here */
+ return NULL;
+}
+
+/** apply configuration to cachedb module 'global' state */
+static int
+cachedb_apply_cfg(struct cachedb_env* cachedb_env, struct config_file* cfg)
+{
+ const char* backend_str = "testframe"; /* TODO get from cfg */
+ if(backend_str && backend_str[0]) {
+ cachedb_env->backend = cachedb_find_backend(backend_str);
+ if(!cachedb_env->backend) {
+ log_err("cachedb: cannot find backend name '%s",
+ backend_str);
+ return NULL;
+ }
+ }
+ /* TODO see if more configuration needs to be applied or not */
+ return 1;
+}
+
+int
+cachedb_init(struct module_env* env, int id)
+{
+ struct cachedb_env* cachedb_env = (struct cachedb_env*)calloc(1,
+ sizeof(struct cachedb_env));
+ if(!cachedb_env) {
+ log_err("malloc failure");
+ return 0;
+ }
+ env->modinfo[id] = (void*)cachedb_env;
+ if(!cachedb_apply_cfg(cachedb_env, env->cfg)) {
+ log_err("cachedb: could not apply configuration settings.");
+ return 0;
+ }
+ /* see if a backend is selected */
+ if(!cachedb_env->backend || !cachedb_env->backend->name)
+ return 1;
+ if(!(*cachedb_env->backend->init)(env, cachedb_env)) {
+ log_err("cachedb: could not init %s backend",
+ cachedb_env->backend->name);
+ return 0;
+ }
+ cachedb_env->enabled = 1;
+ return 1;
+}
+
+void
+cachedb_deinit(struct module_env* env, int id)
+{
+ struct cachedb_env* cachedb_env;
+ if(!env || !env->modinfo[id])
+ return;
+ cachedb_env = (struct cachedb_env*)env->modinfo[id];
+ /* free contents */
+ /* TODO */
+ if(cachedb_env->enabled) {
+ (*cachedb_env->backend->deinit)(env, cachedb_env);
+ }
+
+ free(cachedb_env);
+ env->modinfo[id] = NULL;
+}
+
+/** new query for cachedb */
+static int
+cachedb_new(struct module_qstate* qstate, int id)
+{
+ struct cachedb_qstate* iq = (struct cachedb_qstate*)regional_alloc(
+ qstate->region, sizeof(struct cachedb_qstate));
+ qstate->minfo[id] = iq;
+ if(!iq)
+ return 0;
+ memset(iq, 0, sizeof(*iq));
+ /* initialise it */
+ /* TODO */
+
+ return 1;
+}
+
+/**
+ * Return an error
+ * @param qstate: our query state
+ * @param id: module id
+ * @param rcode: error code (DNS errcode).
+ * @return: 0 for use by caller, to make notation easy, like:
+ * return error_response(..).
+ */
+static int
+error_response(struct module_qstate* qstate, int id, int rcode)
+{
+ verbose(VERB_QUERY, "return error response %s",
+ sldns_lookup_by_id(sldns_rcodes, rcode)?
+ sldns_lookup_by_id(sldns_rcodes, rcode)->name:"??");
+ qstate->return_rcode = rcode;
+ qstate->return_msg = NULL;
+ qstate->ext_state[id] = module_finished;
+ return 0;
+}
+
+/**
+ * Hash the query name, type, class and dbacess-secret into lookup buffer.
+ * @param qstate: query state with query info
+ * and env->cfg with secret.
+ * @param buf: returned buffer with hash to lookup
+ * @param len: length of the buffer.
+ */
+static void
+calc_hash(struct module_qstate* qstate, char* buf, size_t len)
+{
+ uint8_t clear[1024];
+ size_t clen = 0;
+ uint8_t hash[CACHEDB_HASHSIZE/8];
+ const char* hex = "0123456789ABCDEF";
+ const char* secret = "default"; /* TODO: from qstate->env->cfg */
+ size_t i;
+
+ /* copy the hash info into the clear buffer */
+ if(clen + qstate->qinfo.qname_len < sizeof(clear)) {
+ memmove(clear+clen, qstate->qinfo.qname,
+ qstate->qinfo.qname_len);
+ clen += qstate->qinfo.qname_len;
+ }
+ if(clen + 4 < sizeof(clear)) {
+ uint16_t t = htons(qstate->qinfo.qtype);
+ uint16_t c = htons(qstate->qinfo.qclass);
+ memmove(clear+clen, &t, 2);
+ memmove(clear+clen+2, &c, 2);
+ clen += 4;
+ }
+ if(secret && secret[0] && clen + strlen(secret) < sizeof(clear)) {
+ memmove(clear+clen, secret, strlen(secret));
+ clen += strlen(secret);
+ }
+
+ /* hash the buffer */
+ secalgo_hash_sha256(clear, clen, hash);
+ memset(clear, 0, clen);
+
+ /* hex encode output for portability (some online dbs need
+ * no nulls, no control characters, and so on) */
+ log_assert(len >= sizeof(hash)*2 + 1);
+ (void)len;
+ for(i=0; i<sizeof(hash); i++) {
+ buf[i*2] = hex[(hash[i]&0xf0)>>4];
+ buf[i*2+1] = hex[hash[i]&0x0f];
+ }
+ buf[sizeof(hash)*2] = 0;
+}
+
+/** convert data from return_msg into the data buffer */
+static int
+prep_data(struct module_qstate* qstate, struct sldns_buffer* buf)
+{
+ uint64_t timestamp, expiry;
+ size_t oldlim;
+ struct edns_data edns;
+ memset(&edns, 0, sizeof(edns));
+ edns.edns_present = 1;
+ edns.bits = EDNS_DO;
+ edns.ext_rcode = 0;
+ edns.edns_version = EDNS_ADVERTISED_VERSION;
+ edns.udp_size = EDNS_ADVERTISED_SIZE;
+
+ if(!qstate->return_msg || !qstate->return_msg->rep)
+ return 0;
+ if(verbosity >= VERB_ALGO)
+ log_dns_msg("cachedb encoding", &qstate->return_msg->qinfo,
+ qstate->return_msg->rep);
+ if(!reply_info_answer_encode(&qstate->return_msg->qinfo,
+ qstate->return_msg->rep, 0, qstate->query_flags,
+ buf, 0, 1, qstate->env->scratch, 65535, &edns, 1, 0))
+ return 0;
+
+ /* TTLs in the return_msg are relative to time(0) so we have to
+ * store that, we also store the smallest ttl in the packet+time(0)
+ * as the packet expiry time */
+ /* qstate->return_msg->rep->ttl contains that relative shortest ttl */
+ timestamp = (uint64_t)*qstate->env->now;
+ expiry = timestamp + (uint64_t)qstate->return_msg->rep->ttl;
+ timestamp = htobe64(timestamp);
+ expiry = htobe64(expiry);
+ oldlim = sldns_buffer_limit(buf);
+ if(oldlim + sizeof(timestamp)+sizeof(expiry) >=
+ sldns_buffer_capacity(buf))
+ return 0; /* doesn't fit. */
+ sldns_buffer_set_limit(buf, oldlim + sizeof(timestamp)+sizeof(expiry));
+ sldns_buffer_write_at(buf, oldlim, ×tamp, sizeof(timestamp));
+ sldns_buffer_write_at(buf, oldlim+sizeof(timestamp), &expiry,
+ sizeof(expiry));
+
+ return 1;
+}
+
+/** check expiry, return true if matches OK */
+static int
+good_expiry_and_qinfo(struct module_qstate* qstate, struct sldns_buffer* buf)
+{
+ uint64_t expiry;
+ /* the expiry time is the last bytes of the buffer */
+ if(sldns_buffer_limit(buf) < sizeof(expiry))
+ return 0;
+ sldns_buffer_read_at(buf, sldns_buffer_limit(buf)-sizeof(expiry),
+ &expiry, sizeof(expiry));
+ expiry = be64toh(expiry);
+
+ if((time_t)expiry < *qstate->env->now)
+ return 0;
+
+ return 1;
+}
+
+/** convert dns message in buffer to return_msg */
+static int
+parse_data(struct module_qstate* qstate, struct sldns_buffer* buf)
+{
+ struct msg_parse* prs;
+ struct edns_data edns;
+ uint64_t timestamp, expiry;
+ time_t adjust;
+ size_t lim = sldns_buffer_limit(buf);
+ if(lim < LDNS_HEADER_SIZE+sizeof(timestamp)+sizeof(expiry))
+ return 0; /* too short */
+
+ /* remove timestamp and expiry from end */
+ sldns_buffer_read_at(buf, lim-sizeof(expiry), &expiry, sizeof(expiry));
+ sldns_buffer_read_at(buf, lim-sizeof(expiry)-sizeof(timestamp),
+ ×tamp, sizeof(timestamp));
+ expiry = be64toh(expiry);
+ timestamp = be64toh(timestamp);
+
+ /* parse DNS packet */
+ regional_free_all(qstate->env->scratch);
+ prs = (struct msg_parse*)regional_alloc(qstate->env->scratch,
+ sizeof(struct msg_parse));
+ if(!prs)
+ return 0; /* out of memory */
+ memset(prs, 0, sizeof(*prs));
+ memset(&edns, 0, sizeof(edns));
+ sldns_buffer_set_limit(buf, lim - sizeof(expiry)-sizeof(timestamp));
+ if(parse_packet(buf, prs, qstate->env->scratch) != LDNS_RCODE_NOERROR) {
+ sldns_buffer_set_limit(buf, lim);
+ return 0;
+ }
+ if(parse_extract_edns(prs, &edns, qstate->env->scratch) !=
+ LDNS_RCODE_NOERROR) {
+ sldns_buffer_set_limit(buf, lim);
+ return 0;
+ }
+
+ qstate->return_msg = dns_alloc_msg(buf, prs, qstate->region);
+ sldns_buffer_set_limit(buf, lim);
+ if(!qstate->return_msg)
+ return 0;
+
+ qstate->return_rcode = LDNS_RCODE_NOERROR;
+
+ /* see how much of the TTL expired, and remove it */
+ adjust = *qstate->env->now - (time_t)timestamp;
+ verbose(VERB_ALGO, "cachedb msg adjusted down by %d", (int)adjust);
+ /*adjust_msg(qstate->return_msg, adjust);*/
+ /* TODO:
+ msg->rep->ttl = r->ttl - adjust;
+ msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl);
+ for(i=0; i<d->count + d->rrsig_count; i++) {
+ if(d->rr_ttl[i] < adjust)
+ d->rr_ttl[i] = 0;
+ else d->rr_ttl[i] -= adjust;
+ }
+ if(d->ttl < adjust)
+ d->ttl = 0;
+ else d->ttl -= adjust;
+ */
+ /* TODO */
+
+ return 0;
+}
+
+/**
+ * Lookup the qstate.qinfo in extcache, store in qstate.return_msg.
+ * return true if lookup was successful.
+ */
+static int
+cachedb_extcache_lookup(struct module_qstate* qstate, struct cachedb_env* ie)
+{
+ char key[(CACHEDB_HASHSIZE/8)*2+1];
+ calc_hash(qstate, key, sizeof(key));
+
+ /* call backend to fetch data for key into scratch buffer */
+ if( !(*ie->backend->lookup)(qstate->env, ie, key,
+ qstate->env->scratch_buffer)) {
+ return 0;
+ }
+
+ /* check expiry date and check if query-data matches */
+ if( !good_expiry_and_qinfo(qstate, qstate->env->scratch_buffer) ) {
+ return 0;
+ }
+
+ /* parse dns message into return_msg */
+ if( !parse_data(qstate, qstate->env->scratch_buffer) ) {
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Store the qstate.return_msg in extcache for key qstate.info
+ */
+static void
+cachedb_extcache_store(struct module_qstate* qstate, struct cachedb_env* ie)
+{
+ char key[(CACHEDB_HASHSIZE/8)*2+1];
+ calc_hash(qstate, key, sizeof(key));
+
+ /* prepare data in scratch buffer */
+ if(!prep_data(qstate, qstate->env->scratch_buffer))
+ return;
+
+ /* call backend */
+ (*ie->backend->store)(qstate->env, ie, key,
+ sldns_buffer_begin(qstate->env->scratch_buffer),
+ sldns_buffer_limit(qstate->env->scratch_buffer));
+}
+
+/**
+ * See if unbound's internal cache can answer the query
+ */
+static int
+cachedb_intcache_lookup(struct module_qstate* qstate)
+{
+ struct dns_msg* msg;
+ msg = dns_cache_lookup(qstate->env, qstate->qinfo.qname,
+ qstate->qinfo.qname_len, qstate->qinfo.qtype,
+ qstate->qinfo.qclass, qstate->query_flags,
+ qstate->region, qstate->env->scratch);
+ if(!msg && qstate->env->neg_cache) {
+ /* lookup in negative cache; may result in
+ * NOERROR/NODATA or NXDOMAIN answers that need validation */
+ msg = val_neg_getmsg(qstate->env->neg_cache, &qstate->qinfo,
+ qstate->region, qstate->env->rrset_cache,
+ qstate->env->scratch_buffer,
+ *qstate->env->now, 1/*add SOA*/, NULL);
+ }
+ if(!msg)
+ return 0;
+ /* this is the returned msg */
+ qstate->return_rcode = LDNS_RCODE_NOERROR;
+ qstate->return_msg = msg;
+ return 1;
+}
+
+/**
+ * Store query into the internal cache of unbound.
+ */
+static void
+cachedb_intcache_store(struct module_qstate* qstate)
+{
+ if(!qstate->return_msg)
+ return;
+ (void)dns_cache_store(qstate->env, &qstate->qinfo,
+ qstate->return_msg->rep, 0, qstate->prefetch_leeway, 0,
+ qstate->region, qstate->query_flags);
+}
+
+/**
+ * Handle a cachedb module event with a query
+ * @param qstate: query state (from the mesh), passed between modules.
+ * contains qstate->env module environment with global caches and so on.
+ * @param iq: query state specific for this module. per-query.
+ * @param ie: environment specific for this module. global.
+ * @param id: module id.
+ */
+static void
+cachedb_handle_query(struct module_qstate* qstate,
+ struct cachedb_qstate* ATTR_UNUSED(iq),
+ struct cachedb_env* ie, int id)
+{
+ /* check if we are enabled, and skip if so */
+ if(!ie->enabled) {
+ /* pass request to next module */
+ qstate->ext_state[id] = module_wait_module;
+ return;
+ }
+
+ if(qstate->blacklist) {
+ /* cache is blacklisted */
+ /* pass request to next module */
+ qstate->ext_state[id] = module_wait_module;
+ return;
+ }
+
+ /* lookup inside unbound's internal cache */
+ if(cachedb_intcache_lookup(qstate)) {
+ if(verbosity >= VERB_ALGO)
+ log_dns_msg("cachedb internal cache lookup",
+ &qstate->return_msg->qinfo,
+ qstate->return_msg->rep);
+ /* we are done with the query */
+ qstate->ext_state[id] = module_finished;
+ return;
+ }
+
+ /* ask backend cache to see if we have data */
+ if(cachedb_extcache_lookup(qstate, ie)) {
+ if(verbosity >= VERB_ALGO)
+ log_dns_msg(ie->backend->name,
+ &qstate->return_msg->qinfo,
+ qstate->return_msg->rep);
+ /* store this result in internal cache */
+ cachedb_intcache_store(qstate);
+ /* we are done with the query */
+ qstate->ext_state[id] = module_finished;
+ return;
+ }
+
+ /* no cache fetches */
+ /* pass request to next module */
+ qstate->ext_state[id] = module_wait_module;
+}
+
+/**
+ * Handle a cachedb module event with a response from the iterator.
+ * @param qstate: query state (from the mesh), passed between modules.
+ * contains qstate->env module environment with global caches and so on.
+ * @param iq: query state specific for this module. per-query.
+ * @param ie: environment specific for this module. global.
+ * @param id: module id.
+ */
+static void
+cachedb_handle_response(struct module_qstate* qstate,
+ struct cachedb_qstate* ATTR_UNUSED(iq), struct cachedb_env* ie, int id)
+{
+ /* check if we are enabled, and skip if not */
+ if(!ie->enabled) {
+ /* we are done with the query */
+ qstate->ext_state[id] = module_finished;
+ return;
+ }
+
+ /* store the item into the backend cache */
+ cachedb_extcache_store(qstate, ie);
+
+ /* we are done with the query */
+ qstate->ext_state[id] = module_finished;
+}
+
+void
+cachedb_operate(struct module_qstate* qstate, enum module_ev event, int id,
+ struct outbound_entry* outbound)
+{
+ struct cachedb_env* ie = (struct cachedb_env*)qstate->env->modinfo[id];
+ struct cachedb_qstate* iq = (struct cachedb_qstate*)qstate->minfo[id];
+ verbose(VERB_QUERY, "cachedb[module %d] operate: extstate:%s event:%s",
+ id, strextstate(qstate->ext_state[id]), strmodulevent(event));
+ if(iq) log_query_info(VERB_QUERY, "cachedb operate: query",
+ &qstate->qinfo);
+
+ /* perform cachedb state machine */
+ if((event == module_event_new || event == module_event_pass) &&
+ iq == NULL) {
+ if(!cachedb_new(qstate, id)) {
+ (void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ return;
+ }
+ iq = (struct cachedb_qstate*)qstate->minfo[id];
+ }
+ if(iq && (event == module_event_pass || event == module_event_new)) {
+ cachedb_handle_query(qstate, iq, ie, id);
+ return;
+ }
+ if(iq && (event == module_event_moddone)) {
+ cachedb_handle_response(qstate, iq, ie, id);
+ return;
+ }
+ if(iq && outbound) {
+ /* cachedb does not need to process responses at this time
+ * ignore it.
+ cachedb_process_response(qstate, iq, ie, id, outbound, event);
+ */
+ return;
+ }
+ if(event == module_event_error) {
+ verbose(VERB_ALGO, "got called with event error, giving up");
+ (void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ return;
+ }
+
+ log_err("bad event for cachedb");
+ (void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+}
+
+void
+cachedb_inform_super(struct module_qstate* ATTR_UNUSED(qstate),
+ int ATTR_UNUSED(id), struct module_qstate* ATTR_UNUSED(super))
+{
+ /* cachedb does not use subordinate requests at this time */
+ verbose(VERB_ALGO, "cachedb inform_super was called");
+}
+
+void
+cachedb_clear(struct module_qstate* qstate, int id)
+{
+ struct cachedb_qstate* iq;
+ if(!qstate)
+ return;
+ iq = (struct cachedb_qstate*)qstate->minfo[id];
+ if(iq) {
+ /* free contents of iq */
+ /* TODO */
+ }
+ qstate->minfo[id] = NULL;
+}
+
+size_t
+cachedb_get_mem(struct module_env* env, int id)
+{
+ struct cachedb_env* ie = (struct cachedb_env*)env->modinfo[id];
+ if(!ie)
+ return 0;
+ return sizeof(*ie); /* TODO - more mem */
+}
+
+/**
+ * The cachedb function block
+ */
+static struct module_func_block cachedb_block = {
+ "cachedb",
+ &cachedb_init, &cachedb_deinit, &cachedb_operate,
+ &cachedb_inform_super, &cachedb_clear, &cachedb_get_mem
+};
+
+struct module_func_block*
+cachedb_get_funcblock(void)
+{
+ return &cachedb_block;
+}
+#endif /* USE_CACHEDB */
--- /dev/null
+/*
+ * cachedb/cachedb.h - cache from a database external to the program module
+ *
+ * Copyright (c) 2016, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains a module that uses an external database to cache
+ * dns responses.
+ */
+#include "util/module.h"
+struct cachedb_backend;
+
+/**
+ * The global variable environment contents for the cachedb
+ * Shared between threads, this represents long term information.
+ * Like database connections.
+ */
+struct cachedb_env {
+ /** true is cachedb is enabled, the backend is turned on */
+ int enabled;
+
+ /** the backend routines */
+ struct cachedb_backend* backend;
+
+ /** backend specific data here */
+ void* backend_data;
+};
+
+/**
+ * Per query state for the cachedb module.
+ */
+struct cachedb_qstate {
+ int todo;
+};
+
+/**
+ * Backend call routines
+ */
+struct cachedb_backend {
+ /** backend name */
+ const char* name;
+
+ /** Init(env, cachedb_env): false on setup failure */
+ int (*init)(struct module_env*, struct cachedb_env*);
+
+ /** Deinit - close db for program exit */
+ void (*deinit)(struct module_env*, struct cachedb_env*);
+
+ /** Lookup (env, cachedb_env, key, result_buffer): true if found */
+ int (*lookup)(struct module_env*, struct cachedb_env*, char*,
+ struct sldns_buffer*);
+
+ /** Store (env, cachedb_env, key, data, data_len) */
+ void (*store)(struct module_env*, struct cachedb_env*, char*,
+ uint8_t*, size_t);
+};
+
+/** Init the cachedb module */
+int cachedb_init(struct module_env* env, int id);
+/** Deinit the cachedb module */
+void cachedb_deinit(struct module_env* env, int id);
+/** Operate on an event on a query (in qstate). */
+void cachedb_operate(struct module_qstate* qstate, enum module_ev event,
+ int id, struct outbound_entry* outbound);
+/** Subordinate query done, inform this super request of its conclusion */
+void cachedb_inform_super(struct module_qstate* qstate, int id,
+ struct module_qstate* super);
+/** clear the cachedb query-specific contents out of qstate */
+void cachedb_clear(struct module_qstate* qstate, int id);
+/** return memory estimate for cachedb module */
+size_t cachedb_get_mem(struct module_env* env, int id);
+
+/**
+ * Get the function block with pointers to the cachedb functions
+ * @return the function block for "cachedb".
+ */
+struct module_func_block* cachedb_get_funcblock(void);
+
/* Whether the C compiler accepts the "unused" attribute */
#undef HAVE_ATTR_UNUSED
+/* Whether the C compiler accepts the "weak" attribute */
+#undef HAVE_ATTR_WEAK
+
/* Define to 1 if you have the `chown' function. */
#undef HAVE_CHOWN
/* Define to 1 if you have the <event.h> header file. */
#undef HAVE_EVENT_H
+/* Define to 1 if you have the `EVP_MD_CTX_new' function. */
+#undef HAVE_EVP_MD_CTX_NEW
+
/* Define to 1 if you have the `EVP_sha1' function. */
#undef HAVE_EVP_SHA1
/* Define to 1 if you have the <grp.h> header file. */
#undef HAVE_GRP_H
-/* If you have HMAC_CTX_init */
-#undef HAVE_HMAC_CTX_INIT
+/* If you have HMAC_Update */
+#undef HAVE_HMAC_UPDATE
/* Define to 1 if you have the `inet_aton' function. */
#undef HAVE_INET_ATON
/* Define to 1 if you have the `strptime' function. */
#undef HAVE_STRPTIME
+/* Define to 1 if you have the `strsep' function. */
+#undef HAVE_STRSEP
+
/* Define to 1 if `ipi_spec_dst' is a member of `struct in_pktinfo'. */
#undef HAVE_STRUCT_IN_PKTINFO_IPI_SPEC_DST
/* define this to enable debug checks. */
#undef UNBOUND_DEBUG
+/* Define to 1 to use cachedb support */
+#undef USE_CACHEDB
+
/* Define to 1 to enable dnstap support */
#undef USE_DNSTAP
+/* Define this to enable DSA support. */
+#undef USE_DSA
+
/* Define this to enable ECDSA support. */
#undef USE_ECDSA
char *ctime_r(const time_t *timep, char *buf);
#endif
+#ifndef HAVE_STRSEP
+#define strsep unbound_strsep
+char *strsep(char **stringp, const char *delim);
+#endif
+
#ifndef HAVE_ISBLANK
#define isblank unbound_isblank
int isblank(int c);
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.69 for unbound 1.5.8.
+# Generated by GNU Autoconf 2.69 for unbound 1.5.9.
#
# Report bugs to <unbound-bugs@nlnetlabs.nl>.
#
# Identity of this package.
PACKAGE_NAME='unbound'
PACKAGE_TARNAME='unbound'
-PACKAGE_VERSION='1.5.8'
-PACKAGE_STRING='unbound 1.5.8'
+PACKAGE_VERSION='1.5.9'
+PACKAGE_STRING='unbound 1.5.9'
PACKAGE_BUGREPORT='unbound-bugs@nlnetlabs.nl'
PACKAGE_URL=''
enable_sha2
enable_gost
enable_ecdsa
+enable_dsa
enable_event_api
with_libevent
with_libexpat
with_dnstap_socket_path
with_protobuf_c
with_libfstrm
+enable_cachedb
with_libunbound_only
'
ac_precious_vars='build_alias
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures unbound 1.5.8 to adapt to many kinds of systems.
+\`configure' configures unbound 1.5.9 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of unbound 1.5.8:";;
+ short | recursive ) echo "Configuration of unbound 1.5.9:";;
esac
cat <<\_ACEOF
--disable-sha2 Disable SHA256 and SHA512 RRSIG support
--disable-gost Disable GOST support
--disable-ecdsa Disable ECDSA support
- --enable-event-api Enable (experimental) libevent-based libunbound API
- installed to unbound-event.h
+ --disable-dsa Disable DSA support
+ --enable-event-api Enable (experimental) pluggable event base
+ libunbound API installed to unbound-event.h
--enable-static-exe enable to compile executables statically against
(event) libs, for debug purposes
--enable-lock-checks enable to check lock and unlock calls, for debug
to it, smaller install size but libunbound export
table is polluted by internal symbols
--enable-dnstap Enable dnstap support (requires fstrm, protobuf-c)
+ --enable-cachedb enable cachedb module that can use external cache
+ storage
Optional Packages:
--with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
-unbound configure 1.5.8
+unbound configure 1.5.9
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by unbound $as_me 1.5.8, which was
+It was created by unbound $as_me 1.5.9, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
UNBOUND_VERSION_MINOR=5
-UNBOUND_VERSION_MICRO=8
+UNBOUND_VERSION_MICRO=9
LIBUNBOUND_CURRENT=6
-LIBUNBOUND_REVISION=0
+LIBUNBOUND_REVISION=1
LIBUNBOUND_AGE=4
# 1.0.0 had 0:12:0
# 1.0.1 had 0:13:0
# 1.5.6 had 5:9:3
# 1.5.7 had 5:10:3
# 1.5.8 had 6:0:4 # adds ub_ctx_set_stub
+# 1.5.9 had 6:1:4
# Current -- the number of the binary API that we're implementing
# Revision -- which iteration of the implementation of the binary
fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler (${CC-cc}) accepts the \"weak\" attribute" >&5
+$as_echo_n "checking whether the C compiler (${CC-cc}) accepts the \"weak\" attribute... " >&6; }
+if ${ac_cv_c_weak_attribute+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_c_weak_attribute=no
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+ #include <stdio.h>
+__attribute__((weak)) void f(int x) { printf("%d", x); }
+
+int
+main ()
+{
+
+ f(1);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_c_weak_attribute="yes"
+else
+ ac_cv_c_weak_attribute="no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_weak_attribute" >&5
+$as_echo "$ac_cv_c_weak_attribute" >&6; }
+if test $ac_cv_c_weak_attribute = yes; then
+
+$as_echo "#define HAVE_ATTR_WEAK 1" >>confdefs.h
+
+fi
+
+
if test "$srcdir" != "."; then
CPPFLAGS="$CPPFLAGS -I$srcdir"
fi
fi
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for HMAC_CTX_init in -lcrypto" >&5
-$as_echo_n "checking for HMAC_CTX_init in -lcrypto... " >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for HMAC_Update in -lcrypto" >&5
+$as_echo_n "checking for HMAC_Update in -lcrypto... " >&6; }
LIBS="$LIBS -lcrypto"
LIBSSL_LIBS="$LIBSSL_LIBS -lcrypto"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
main ()
{
- int HMAC_CTX_init(void);
- (void)HMAC_CTX_init();
+ int HMAC_Update(void);
+ (void)HMAC_Update();
;
return 0;
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
-$as_echo "#define HAVE_HMAC_CTX_INIT 1" >>confdefs.h
+$as_echo "#define HAVE_HMAC_UPDATE 1" >>confdefs.h
else
main ()
{
- int HMAC_CTX_init(void);
- (void)HMAC_CTX_init();
+ int HMAC_Update(void);
+ (void)HMAC_Update();
;
return 0;
if ac_fn_c_try_link "$LINENO"; then :
-$as_echo "#define HAVE_HMAC_CTX_INIT 1" >>confdefs.h
+$as_echo "#define HAVE_HMAC_UPDATE 1" >>confdefs.h
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
main ()
{
- int HMAC_CTX_init(void);
- (void)HMAC_CTX_init();
+ int HMAC_Update(void);
+ (void)HMAC_Update();
;
return 0;
if ac_fn_c_try_link "$LINENO"; then :
-$as_echo "#define HAVE_HMAC_CTX_INIT 1" >>confdefs.h
+$as_echo "#define HAVE_HMAC_UPDATE 1" >>confdefs.h
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
- as_fn_error $? "OpenSSL found in $ssldir, but version 0.9.7 or higher is required" "$LINENO" 5
+ LIBS="$BAKLIBS"
+ LIBSSL_LIBS="$BAKSSLLIBS"
+ LIBS="$LIBS -ldl -pthread"
+ LIBSSL_LIBS="$LIBSSL_LIBS -ldl -pthread"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if -lcrypto needs -ldl -pthread" >&5
+$as_echo_n "checking if -lcrypto needs -ldl -pthread... " >&6; }
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ int HMAC_Update(void);
+ (void)HMAC_Update();
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+
+
+$as_echo "#define HAVE_HMAC_UPDATE 1" >>confdefs.h
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+
+else
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ as_fn_error $? "OpenSSL found in $ssldir, but version 0.9.7 or higher is required" "$LINENO" 5
+
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
fi
rm -f core conftest.err conftest.$ac_objext \
done
-for ac_func in OPENSSL_config EVP_sha1 EVP_sha256 EVP_sha512 FIPS_mode
+for ac_func in OPENSSL_config EVP_sha1 EVP_sha256 EVP_sha512 FIPS_mode EVP_MD_CTX_new
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
;;
esac
+# Check whether --enable-dsa was given.
+if test "${enable_dsa+set}" = set; then :
+ enableval=$enable_dsa;
+fi
+
+use_dsa="no"
+case "$enable_ecdsa" in
+ no)
+ ;;
+ *)
+ # detect if DSA is supported, and turn it off if not.
+ ac_fn_c_check_func "$LINENO" "EVP_dss1" "ac_cv_func_EVP_dss1"
+if test "x$ac_cv_func_EVP_dss1" = xyes; then :
+
+
+cat >>confdefs.h <<_ACEOF
+#define USE_DSA 1
+_ACEOF
+
+
+else
+ if test "x$enable_dsa" = "xyes"; then as_fn_error $? "OpenSSL does not support DSA and you used --enable-dsa." "$LINENO" 5
+ fi
+fi
+
+ ;;
+esac
+
+
# Check whether --enable-event-api was given.
if test "${enable_event_api+set}" = set; then :
enableval=$enable_event_api;
fi
-use_unbound_event="no"
case "$enable_event_api" in
yes)
- use_unbound_event="yes"
+ UNBOUND_EVENT_INSTALL=unbound-event-install
+
+ UNBOUND_EVENT_UNINSTALL=unbound-event-uninstall
+
;;
*)
;;
if test -n "$BAK_LDFLAGS_SET"; then
LDFLAGS="$BAK_LDFLAGS"
fi
- if test "$use_unbound_event" = "yes"; then
- UNBOUND_EVENT_INSTALL=unbound-event-install
-
- UNBOUND_EVENT_UNINSTALL=unbound-event-uninstall
-
- fi
else
$as_echo "#define USE_MINI_EVENT 1" >>confdefs.h
fi
+ac_fn_c_check_func "$LINENO" "strsep" "ac_cv_func_strsep"
+if test "x$ac_cv_func_strsep" = xyes; then :
+ $as_echo "#define HAVE_STRSEP 1" >>confdefs.h
+
+else
+ case " $LIBOBJS " in
+ *" strsep.$ac_objext "* ) ;;
+ *) LIBOBJS="$LIBOBJS strsep.$ac_objext"
+ ;;
+esac
+
+fi
+
+
# Check whether --enable-allsymbols was given.
if test "${enable_allsymbols+set}" = set; then :
fi
+# check for cachedb if requested
+# Check whether --enable-cachedb was given.
+if test "${enable_cachedb+set}" = set; then :
+ enableval=$enable_cachedb;
+fi
+
+case "$enable_cachedb" in
+ yes)
+
+$as_echo "#define USE_CACHEDB 1" >>confdefs.h
+
+ ;;
+ no|*)
+ # nothing
+ ;;
+esac
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if ${MAKE:-make} supports $< with implicit rule in scope" >&5
$as_echo_n "checking if ${MAKE:-make} supports $< with implicit rule in scope... " >&6; }
# on openBSD, the implicit rule make $< work.
-version=1.5.8
+version=1.5.9
date=`date +'%b %e, %Y'`
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
-This file was extended by unbound $as_me 1.5.8, which was
+This file was extended by unbound $as_me 1.5.9, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
-unbound config.status 1.5.8
+unbound config.status 1.5.9
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"
# must be numbers. ac_defun because of later processing
m4_define([VERSION_MAJOR],[1])
m4_define([VERSION_MINOR],[5])
-m4_define([VERSION_MICRO],[8])
+m4_define([VERSION_MICRO],[9])
AC_INIT(unbound, m4_defn([VERSION_MAJOR]).m4_defn([VERSION_MINOR]).m4_defn([VERSION_MICRO]), unbound-bugs@nlnetlabs.nl, unbound)
AC_SUBST(UNBOUND_VERSION_MAJOR, [VERSION_MAJOR])
AC_SUBST(UNBOUND_VERSION_MINOR, [VERSION_MINOR])
AC_SUBST(UNBOUND_VERSION_MICRO, [VERSION_MICRO])
LIBUNBOUND_CURRENT=6
-LIBUNBOUND_REVISION=0
+LIBUNBOUND_REVISION=1
LIBUNBOUND_AGE=4
# 1.0.0 had 0:12:0
# 1.0.1 had 0:13:0
# 1.5.6 had 5:9:3
# 1.5.7 had 5:10:3
# 1.5.8 had 6:0:4 # adds ub_ctx_set_stub
+# 1.5.9 had 6:1:4
# Current -- the number of the binary API that we're implementing
# Revision -- which iteration of the implementation of the binary
ACX_CHECK_FORMAT_ATTRIBUTE
ACX_CHECK_UNUSED_ATTRIBUTE
+AC_DEFUN([CHECK_WEAK_ATTRIBUTE],
+[AC_REQUIRE([AC_PROG_CC])
+AC_MSG_CHECKING(whether the C compiler (${CC-cc}) accepts the "weak" attribute)
+AC_CACHE_VAL(ac_cv_c_weak_attribute,
+[ac_cv_c_weak_attribute=no
+AC_TRY_COMPILE(
+[ #include <stdio.h>
+__attribute__((weak)) void f(int x) { printf("%d", x); }
+], [
+ f(1);
+],
+[ac_cv_c_weak_attribute="yes"],
+[ac_cv_c_weak_attribute="no"])
+])
+
+AC_MSG_RESULT($ac_cv_c_weak_attribute)
+if test $ac_cv_c_weak_attribute = yes; then
+ AC_DEFINE(HAVE_ATTR_WEAK, 1, [Whether the C compiler accepts the "weak" attribute])
+fi
+])dnl End of CHECK_WEAK_ATTRIBUTE
+
+CHECK_WEAK_ATTRIBUTE
+
if test "$srcdir" != "."; then
CPPFLAGS="$CPPFLAGS -I$srcdir"
fi
fi
AC_CHECK_HEADERS([openssl/conf.h],,, [AC_INCLUDES_DEFAULT])
AC_CHECK_HEADERS([openssl/engine.h],,, [AC_INCLUDES_DEFAULT])
-AC_CHECK_FUNCS([OPENSSL_config EVP_sha1 EVP_sha256 EVP_sha512 FIPS_mode])
+AC_CHECK_FUNCS([OPENSSL_config EVP_sha1 EVP_sha256 EVP_sha512 FIPS_mode EVP_MD_CTX_new])
AC_CHECK_DECLS([SSL_COMP_get_compression_methods,sk_SSL_COMP_pop_free,SSL_CTX_set_ecdh_auto], [], [], [
AC_INCLUDES_DEFAULT
#ifdef HAVE_OPENSSL_ERR_H
;;
esac
-AC_ARG_ENABLE(event-api, AC_HELP_STRING([--enable-event-api], [Enable (experimental) libevent-based libunbound API installed to unbound-event.h]))
-use_unbound_event="no"
+AC_ARG_ENABLE(dsa, AC_HELP_STRING([--disable-dsa], [Disable DSA support]))
+use_dsa="no"
+case "$enable_ecdsa" in
+ no)
+ ;;
+ *)
+ # detect if DSA is supported, and turn it off if not.
+ AC_CHECK_FUNC(EVP_dss1, [
+ AC_DEFINE_UNQUOTED([USE_DSA], [1], [Define this to enable DSA support.])
+ ], [if test "x$enable_dsa" = "xyes"; then AC_MSG_ERROR([OpenSSL does not support DSA and you used --enable-dsa.])
+ fi ])
+ ;;
+esac
+
+
+AC_ARG_ENABLE(event-api, AC_HELP_STRING([--enable-event-api], [Enable (experimental) pluggable event base libunbound API installed to unbound-event.h]))
case "$enable_event_api" in
yes)
- use_unbound_event="yes"
+ AC_SUBST(UNBOUND_EVENT_INSTALL, [unbound-event-install])
+ AC_SUBST(UNBOUND_EVENT_UNINSTALL, [unbound-event-uninstall])
;;
*)
;;
if test -n "$BAK_LDFLAGS_SET"; then
LDFLAGS="$BAK_LDFLAGS"
fi
- if test "$use_unbound_event" = "yes"; then
- AC_SUBST(UNBOUND_EVENT_INSTALL, [unbound-event-install])
- AC_SUBST(UNBOUND_EVENT_UNINSTALL, [unbound-event-uninstall])
- fi
else
AC_DEFINE(USE_MINI_EVENT, 1, [Define if you want to use internal select based events])
fi
LIBOBJ_WITHOUT_CTIME="$LIBOBJS"
AC_SUBST(LIBOBJ_WITHOUT_CTIME)
AC_REPLACE_FUNCS(ctime_r)
+AC_REPLACE_FUNCS(strsep)
AC_ARG_ENABLE(allsymbols, AC_HELP_STRING([--enable-allsymbols], [export all symbols from libunbound and link binaries to it, smaller install size but libunbound export table is polluted by internal symbols]))
case "$enable_allsymbols" in
]
)
+# check for cachedb if requested
+AC_ARG_ENABLE(cachedb, AC_HELP_STRING([--enable-cachedb], [enable cachedb module that can use external cache storage]))
+case "$enable_cachedb" in
+ yes)
+ AC_DEFINE([USE_CACHEDB], [1], [Define to 1 to use cachedb support])
+ ;;
+ no|*)
+ # nothing
+ ;;
+esac
+
AC_MSG_CHECKING([if ${MAKE:-make} supports $< with implicit rule in scope])
# on openBSD, the implicit rule make $< work.
# on Solaris, it does not work ($? is changed sources, $^ lists dependencies).
char *ctime_r(const time_t *timep, char *buf);
#endif
+#ifndef HAVE_STRSEP
+#define strsep unbound_strsep
+char *strsep(char **stringp, const char *delim);
+#endif
+
#ifndef HAVE_ISBLANK
#define isblank unbound_isblank
int isblank(int c);
#ifdef HAVE_SSL
ERR_load_crypto_strings();
ERR_load_SSL_strings();
-# ifdef HAVE_OPENSSL_CONFIG
- OPENSSL_config("unbound");
-# endif
# ifdef USE_GOST
(void)sldns_key_EVP_load_gost_id();
# endif
# endif
# if HAVE_DECL_SSL_COMP_GET_COMPRESSION_METHODS && HAVE_DECL_SK_SSL_COMP_POP_FREE
# ifndef S_SPLINT_S
+# if OPENSSL_VERSION_NUMBER < 0x10100000
sk_SSL_COMP_pop_free(comp_meth, (void(*)())CRYPTO_free);
+# endif
# endif
# endif
# ifdef HAVE_OPENSSL_CONFIG
EVP_cleanup();
+# if OPENSSL_VERSION_NUMBER < 0x10100000
ENGINE_cleanup();
+# endif
CONF_modules_free();
# endif
CRYPTO_cleanup_all_ex_data(); /* safe, no more threads right now */
- ERR_remove_state(0);
ERR_free_strings();
+# if OPENSSL_VERSION_NUMBER < 0x10100000
RAND_cleanup();
+# endif
# if defined(HAVE_SSL) && defined(OPENSSL_THREADS) && !defined(THREADS_DISABLED)
ub_openssl_lock_delete();
# endif
#include "util/locks.h"
#include "util/alloc.h"
#include "services/modstack.h"
-#ifdef UB_ON_WINDOWS
-# include "util/winsock_event.h"
-#endif
struct config_file;
struct worker;
struct listen_port;
/*
* The following function was generated using the openssl utility, using
- * the command : "openssl dhparam -dsaparam -C 1024"
+ * the command : "openssl dhparam -C 2048"
* (some openssl versions reject DH that is 'too small', eg. 512).
*/
#ifndef S_SPLINT_S
-DH *get_dh1024()
-{
- static unsigned char dh1024_p[]={
- 0xB3,0x67,0x2E,0x3B,0x68,0xC5,0xDA,0x58,0x46,0xD6,0x2B,0xD3,
- 0x41,0x78,0x97,0xE4,0xE1,0x61,0x71,0x68,0xE6,0x0F,0x1D,0x78,
- 0x05,0xAA,0xF0,0xFF,0x30,0xDF,0xAC,0x49,0x7F,0xE0,0x90,0xFE,
- 0xB9,0x56,0x4E,0x3F,0xE2,0x98,0x8A,0xED,0xF5,0x28,0x39,0xEF,
- 0x2E,0xA6,0xB7,0x67,0xB2,0x43,0xE4,0x53,0xF8,0xEB,0x2C,0x1F,
- 0x06,0x77,0x3A,0x6F,0x62,0x98,0xC1,0x3B,0xF7,0xBA,0x4D,0x93,
- 0xF7,0xEB,0x5A,0xAD,0xC5,0x5F,0xF0,0xB7,0x24,0x35,0x81,0xF7,
- 0x7F,0x1F,0x24,0xC0,0xDF,0xD3,0xD8,0x40,0x72,0x7E,0xF3,0x19,
- 0x2B,0x26,0x27,0xF4,0xB6,0xB3,0xD4,0x7D,0x08,0x23,0xBE,0x68,
- 0x2B,0xCA,0xB4,0x46,0xA8,0x9E,0xDD,0x6C,0x3D,0x75,0xA6,0x48,
- 0xF7,0x44,0x43,0xBF,0x91,0xC2,0xB4,0x49,
+DH *get_dh2048()
+{
+ static unsigned char dh2048_p[]={
+ 0xE7,0x36,0x28,0x3B,0xE4,0xC3,0x32,0x1C,0x01,0xC3,0x67,0xD6,
+ 0xF5,0xF3,0xDA,0xDC,0x71,0xC0,0x42,0x8B,0xE6,0xEB,0x8D,0x80,
+ 0x35,0x7F,0x09,0x45,0x30,0xE5,0xB2,0x92,0x81,0x3F,0x08,0xCD,
+ 0x36,0x5E,0x19,0x83,0x62,0xCC,0xAE,0x9B,0x81,0x66,0x24,0xEE,
+ 0x16,0x6F,0xA9,0x9E,0xF4,0x82,0x1B,0xDD,0x46,0xC7,0x33,0x5D,
+ 0xF4,0xCA,0xE6,0x8F,0xFC,0xD4,0xD8,0x58,0x94,0x24,0x5D,0xFF,
+ 0x0A,0xE8,0xEF,0x3D,0xCE,0xBB,0x50,0x94,0xE0,0x5F,0xE8,0x41,
+ 0xC3,0x35,0x30,0x37,0xD5,0xCB,0x8F,0x3D,0x95,0x15,0x1A,0x77,
+ 0x42,0xB2,0x06,0x86,0xF6,0x09,0x66,0x0E,0x9A,0x25,0x94,0x3E,
+ 0xD2,0x04,0x25,0x25,0x1D,0x23,0xEB,0xDC,0x4D,0x0C,0x83,0x28,
+ 0x2E,0x15,0x81,0x2D,0xC1,0xAF,0x8D,0x36,0x64,0xE3,0x9A,0x83,
+ 0x78,0xC2,0x8D,0xC0,0x9D,0xD9,0x3A,0x1C,0xC5,0x2B,0x50,0x68,
+ 0x07,0xA9,0x4B,0x8C,0x07,0x57,0xD6,0x15,0x03,0x4E,0x9E,0x01,
+ 0xF2,0x6F,0x35,0xAC,0x26,0x9C,0x92,0x68,0x61,0x13,0xFB,0x01,
+ 0xBA,0x22,0x36,0x01,0x55,0xB6,0x62,0xD9,0xB2,0x98,0xCE,0x5D,
+ 0x4B,0xA5,0x41,0xD6,0xE5,0x70,0x78,0x12,0x1F,0x64,0xB6,0x6F,
+ 0xB0,0x91,0x51,0x91,0x92,0xC0,0x94,0x3A,0xD1,0x28,0x4D,0x30,
+ 0x84,0x3E,0xE4,0xE4,0x7F,0x47,0x89,0xB1,0xB6,0x8C,0x8E,0x0E,
+ 0x26,0xDB,0xCD,0x17,0x07,0x2A,0x21,0x7A,0xCC,0x68,0xE8,0x57,
+ 0x94,0x9E,0x59,0x61,0xEC,0x20,0x34,0x26,0x0D,0x66,0x44,0xEB,
+ 0x6F,0x02,0x58,0xE2,0xED,0xF6,0xF3,0x1B,0xBF,0x9E,0x45,0x52,
+ 0x5A,0x49,0xA1,0x5B,
};
- static unsigned char dh1024_g[]={
- 0x5F,0x37,0xB5,0x80,0x4D,0xB4,0xC4,0xB2,0x37,0x12,0xD5,0x2F,
- 0x56,0x81,0xB0,0xDF,0x3D,0x27,0xA2,0x54,0xE7,0x14,0x65,0x2D,
- 0x72,0xA8,0x97,0xE0,0xA9,0x4A,0x09,0x5E,0x89,0xBE,0x34,0x9A,
- 0x90,0x98,0xC1,0xE8,0xBB,0x01,0x2B,0xC2,0x74,0x74,0x90,0x59,
- 0x0B,0x72,0x62,0x5C,0xFD,0x49,0x63,0x4B,0x38,0x91,0xF1,0x7F,
- 0x13,0x25,0xEB,0x52,0x50,0x47,0xA2,0x8C,0x32,0x28,0x42,0xAC,
- 0xBD,0x7A,0xCC,0x58,0xBE,0x36,0xDA,0x6A,0x24,0x06,0xC7,0xF1,
- 0xDA,0x8D,0x8A,0x3B,0x03,0xFA,0x6F,0x25,0xE5,0x20,0xA7,0xD6,
- 0x6F,0x74,0x61,0x53,0x14,0x81,0x29,0x04,0xB5,0x61,0x12,0x53,
- 0xA3,0xD6,0x09,0x98,0x0C,0x8F,0x1C,0xBB,0xD7,0x1C,0x2C,0xEE,
- 0x56,0x4B,0x74,0x8F,0x4A,0xF8,0xA9,0xD5,
+ static unsigned char dh2048_g[]={
+ 0x02,
};
DH *dh;
if ((dh=DH_new()) == NULL) return(NULL);
- dh->p=BN_bin2bn(dh1024_p,sizeof(dh1024_p),NULL);
- dh->g=BN_bin2bn(dh1024_g,sizeof(dh1024_g),NULL);
+ dh->p=BN_bin2bn(dh2048_p,sizeof(dh2048_p),NULL);
+ dh->g=BN_bin2bn(dh2048_g,sizeof(dh2048_g),NULL);
if ((dh->p == NULL) || (dh->g == NULL))
{ DH_free(dh); return(NULL); }
- dh->length = 160;
return(dh);
}
#endif /* SPLINT */
/* Since we have no certificates and hence no source of
* DH params, let's generate and set them
*/
- if(!SSL_CTX_set_tmp_dh(rc->ctx,get_dh1024())) {
+ if(!SSL_CTX_set_tmp_dh(rc->ctx,get_dh2048())) {
log_crypto_err("Wanted to set DH param, but failed");
return NULL;
}
/* open fd */
fd = create_tcp_accept_sock(res, 1, &noproto, 0,
- cfg->ip_transparent, 0);
+ cfg->ip_transparent, 0, cfg->ip_freebind);
freeaddrinfo(res);
}
#include "util/data/msgreply.h"
#include "util/module.h"
#include "util/net_help.h"
+#include "util/ub_event.h"
#include <signal.h>
#include <fcntl.h>
#include <openssl/crypto.h>
#include <login_cap.h>
#endif
-#ifdef USE_MINI_EVENT
-# ifdef USE_WINSOCK
-# include "util/winsock_event.h"
-# else
-# include "util/mini_event.h"
-# endif
-#else
-# ifdef HAVE_EVENT_H
-# include <event.h>
-# else
-# include "event2/event.h"
-# include "event2/event_struct.h"
-# include "event2/event_compat.h"
-# endif
-#endif
-
#ifdef UB_ON_WINDOWS
# include "winrc/win_svc.h"
#endif
void* unbound_start_brk = 0;
#endif
-#if !defined(HAVE_EVENT_BASE_GET_METHOD) && (defined(HAVE_EV_LOOP) || defined(HAVE_EV_DEFAULT_LOOP))
-static const char* ev_backend2str(int b)
-{
- switch(b) {
- case EVBACKEND_SELECT: return "select";
- case EVBACKEND_POLL: return "poll";
- case EVBACKEND_EPOLL: return "epoll";
- case EVBACKEND_KQUEUE: return "kqueue";
- case EVBACKEND_DEVPOLL: return "devpoll";
- case EVBACKEND_PORT: return "evport";
- }
- return "unknown";
-}
-#endif
-
-/** get the event system in use */
-static void get_event_sys(const char** n, const char** s, const char** m)
-{
-#ifdef USE_WINSOCK
- *n = "event";
- *s = "winsock";
- *m = "WSAWaitForMultipleEvents";
-#elif defined(USE_MINI_EVENT)
- *n = "mini-event";
- *s = "internal";
- *m = "select";
-#else
- struct event_base* b;
- *s = event_get_version();
-# ifdef HAVE_EVENT_BASE_GET_METHOD
- *n = "libevent";
- b = event_base_new();
- *m = event_base_get_method(b);
-# elif defined(HAVE_EV_LOOP) || defined(HAVE_EV_DEFAULT_LOOP)
- *n = "libev";
- b = (struct event_base*)ev_default_loop(EVFLAG_AUTO);
- *m = ev_backend2str(ev_backend((struct ev_loop*)b));
-# else
- *n = "unknown";
- *m = "not obtainable";
- b = NULL;
-# endif
-# ifdef HAVE_EVENT_BASE_FREE
- event_base_free(b);
-# endif
-#endif
-}
-
/** print usage. */
static void usage()
{
printf(" service - used to start from services control panel\n");
#endif
printf("Version %s\n", PACKAGE_VERSION);
- get_event_sys(&evnm, &evsys, &evmethod);
+ ub_get_event_sys(NULL, &evnm, &evsys, &evmethod);
printf("linked libs: %s %s (it uses %s), %s\n",
evnm, evsys, evmethod,
#ifdef HAVE_SSL
struct rlimit rlim;
if(total > 1024 &&
- strncmp(event_get_version(), "mini-event", 10) == 0) {
+ strncmp(ub_event_get_version(), "mini-event", 10) == 0) {
log_warn("too many file descriptors requested. The builtin"
"mini-event cannot handle more than 1024. Config "
"for less fds or compile with libevent");
total = 1024;
}
if(perthread > 64 &&
- strncmp(event_get_version(), "winsock-event", 13) == 0) {
+ strncmp(ub_event_get_version(), "winsock-event", 13) == 0) {
log_err("too many file descriptors requested. The winsock"
" event handler cannot handle more than 64 per "
" thread. Config for less fds");
qinfo->qname_len, qinfo->qtype, qinfo->qclass,
worker->scratchpad, &msg, timenow);
if(!dp) { /* no delegation, need to reprime */
- regional_free_all(worker->scratchpad);
return 0;
}
if(must_validate) {
case sec_status_unchecked:
/* some rrsets have not been verified yet, go and
* let validator do that */
- regional_free_all(worker->scratchpad);
return 0;
case sec_status_bogus:
/* some rrsets are bogus, reply servfail */
edns->udp_size = EDNS_ADVERTISED_SIZE;
edns->ext_rcode = 0;
edns->bits &= EDNS_DO;
+ if(!edns_opt_inplace_reply(edns, worker->scratchpad))
+ return 0;
error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
&msg->qinfo, id, flags, edns);
- regional_free_all(worker->scratchpad);
if(worker->stats.extended) {
worker->stats.ans_bogus++;
worker->stats.ans_rcode[LDNS_RCODE_SERVFAIL]++;
edns->udp_size = EDNS_ADVERTISED_SIZE;
edns->ext_rcode = 0;
edns->bits &= EDNS_DO;
+ if(!edns_opt_inplace_reply(edns, worker->scratchpad))
+ return 0;
msg->rep->flags |= BIT_QR|BIT_RA;
if(!reply_info_answer_encode(&msg->qinfo, msg->rep, id, flags,
repinfo->c->buffer, 0, 1, worker->scratchpad,
error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
&msg->qinfo, id, flags, edns);
}
- regional_free_all(worker->scratchpad);
if(worker->stats.extended) {
if(secure) worker->stats.ans_secure++;
server_stats_insrcode(&worker->stats, repinfo->c->buffer);
bail_out:
rrset_array_unlock_touch(worker->env.rrset_cache,
worker->scratchpad, rep->ref, rep->rrset_count);
- regional_free_all(worker->scratchpad);
return 0;
}
}
edns->udp_size = EDNS_ADVERTISED_SIZE;
edns->ext_rcode = 0;
edns->bits &= EDNS_DO;
+ if(!edns_opt_inplace_reply(edns, worker->scratchpad))
+ return 0;
error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
qinfo, id, flags, edns);
rrset_array_unlock_touch(worker->env.rrset_cache,
worker->scratchpad, rep->ref, rep->rrset_count);
- regional_free_all(worker->scratchpad);
if(worker->stats.extended) {
worker->stats.ans_bogus ++;
worker->stats.ans_rcode[LDNS_RCODE_SERVFAIL] ++;
edns->udp_size = EDNS_ADVERTISED_SIZE;
edns->ext_rcode = 0;
edns->bits &= EDNS_DO;
+ if(!edns_opt_inplace_reply(edns, worker->scratchpad))
+ return 0;
if(!reply_info_answer_encode(qinfo, rep, id, flags,
repinfo->c->buffer, timenow, 1, worker->scratchpad,
udpsize, edns, (int)(edns->bits & EDNS_DO), secure)) {
* is bad while holding locks. */
rrset_array_unlock_touch(worker->env.rrset_cache, worker->scratchpad,
rep->ref, rep->rrset_count);
- regional_free_all(worker->scratchpad);
if(worker->stats.extended) {
if(secure) worker->stats.ans_secure++;
server_stats_insrcode(&worker->stats, repinfo->c->buffer);
* @param pkt: buffer
* @param str: string to put into text record (<255).
* @param edns: edns reply information.
+ * @param worker: worker with scratch region.
*/
static void
-chaos_replystr(sldns_buffer* pkt, const char* str, struct edns_data* edns)
+chaos_replystr(sldns_buffer* pkt, const char* str, struct edns_data* edns,
+ struct worker* worker)
{
size_t len = strlen(str);
unsigned int rd = LDNS_RD_WIRE(sldns_buffer_begin(pkt));
edns->edns_version = EDNS_ADVERTISED_VERSION;
edns->udp_size = EDNS_ADVERTISED_SIZE;
edns->bits &= EDNS_DO;
+ if(!edns_opt_inplace_reply(edns, worker->scratchpad))
+ edns->opt_list = NULL;
attach_edns_record(pkt, edns);
}
char buf[MAXHOSTNAMELEN+1];
if (gethostname(buf, MAXHOSTNAMELEN) == 0) {
buf[MAXHOSTNAMELEN] = 0;
- chaos_replystr(pkt, buf, edns);
+ chaos_replystr(pkt, buf, edns, w);
} else {
log_err("gethostname: %s", strerror(errno));
- chaos_replystr(pkt, "no hostname", edns);
+ chaos_replystr(pkt, "no hostname", edns, w);
}
}
- else chaos_replystr(pkt, cfg->identity, edns);
+ else chaos_replystr(pkt, cfg->identity, edns, w);
return 1;
}
if(query_dname_compare(qinfo->qname,
if(cfg->hide_version)
return 0;
if(cfg->version==NULL || cfg->version[0]==0)
- chaos_replystr(pkt, PACKAGE_STRING, edns);
- else chaos_replystr(pkt, cfg->version, edns);
+ chaos_replystr(pkt, PACKAGE_STRING, edns, w);
+ else chaos_replystr(pkt, cfg->version, edns, w);
return 1;
}
return 0;
}
goto send_reply;
}
- if((ret=parse_edns_from_pkt(c->buffer, &edns)) != 0) {
+ if((ret=parse_edns_from_pkt(c->buffer, &edns, worker->scratchpad)) != 0) {
struct edns_data reply_edns;
verbose(VERB_ALGO, "worker parse edns: formerror.");
log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
error_encode(c->buffer, ret, &qinfo,
*(uint16_t*)(void *)sldns_buffer_begin(c->buffer),
sldns_buffer_read_u16_at(c->buffer, 2), &reply_edns);
+ regional_free_all(worker->scratchpad);
server_stats_insrcode(&worker->stats, c->buffer);
goto send_reply;
}
edns.edns_version = EDNS_ADVERTISED_VERSION;
edns.udp_size = EDNS_ADVERTISED_SIZE;
edns.bits &= EDNS_DO;
+ edns.opt_list = NULL;
verbose(VERB_ALGO, "query with bad edns version.");
log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
error_encode(c->buffer, EDNS_RCODE_BADVERS&0xf, &qinfo,
*(uint16_t*)(void *)sldns_buffer_begin(c->buffer),
sldns_buffer_read_u16_at(c->buffer, 2), NULL);
attach_edns_record(c->buffer, &edns);
+ regional_free_all(worker->scratchpad);
goto send_reply;
}
if(edns.edns_present && edns.udp_size < NORMAL_UDP_SIZE &&
sldns_buffer_write_at(c->buffer, 4,
(uint8_t*)"\0\0\0\0\0\0\0\0", 8);
sldns_buffer_flip(c->buffer);
+ regional_free_all(worker->scratchpad);
goto send_reply;
}
if(worker->stats.extended)
if(qinfo.qclass == LDNS_RR_CLASS_CH && answer_chaos(worker, &qinfo,
&edns, c->buffer)) {
server_stats_insrcode(&worker->stats, c->buffer);
+ regional_free_all(worker->scratchpad);
goto send_reply;
}
if(local_zones_answer(worker->daemon->local_zones, &qinfo, &edns,
* might need to bail out based on ACLs now. */
if((ret=deny_refuse_non_local(c, acl, worker, repinfo)) != -1)
{
+ regional_free_all(worker->scratchpad);
if(ret == 1)
goto send_reply;
return ret;
LDNS_RCODE_SET(sldns_buffer_begin(c->buffer),
LDNS_RCODE_REFUSED);
sldns_buffer_flip(c->buffer);
+ regional_free_all(worker->scratchpad);
server_stats_insrcode(&worker->stats, c->buffer);
log_addr(VERB_ALGO, "refused nonrec (cache snoop) query from",
&repinfo->addr, repinfo->addrlen);
sldns_buffer_read_u16_at(c->buffer, 2),
repinfo, leeway);
rc = 0;
+ regional_free_all(worker->scratchpad);
goto send_reply_rc;
}
lock_rw_unlock(&e->lock);
+ regional_free_all(worker->scratchpad);
goto send_reply;
}
verbose(VERB_ALGO, "answer from the cache failed");
*(uint16_t*)(void *)sldns_buffer_begin(c->buffer),
sldns_buffer_read_u16_at(c->buffer, 2), repinfo,
&edns)) {
+ regional_free_all(worker->scratchpad);
goto send_reply;
}
verbose(VERB_ALGO, "answer norec from cache -- "
mesh_new_client(worker->env.mesh, &qinfo,
sldns_buffer_read_u16_at(c->buffer, 2),
&edns, repinfo, *(uint16_t*)(void *)sldns_buffer_begin(c->buffer));
+ regional_free_all(worker->scratchpad);
worker_mem_report(worker, NULL);
return 0;
struct outbound_entry*
worker_send_query(uint8_t* qname, size_t qnamelen, uint16_t qtype,
uint16_t qclass, uint16_t flags, int dnssec, int want_dnssec,
- int nocaps, struct sockaddr_storage* addr, socklen_t addrlen,
- uint8_t* zone, size_t zonelen, struct module_qstate* q)
+ int nocaps, struct edns_option* opt_list,
+ struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
+ size_t zonelen, struct module_qstate* q)
{
struct worker* worker = q->env->worker;
struct outbound_entry* e = (struct outbound_entry*)regional_alloc(
e->qstate = q;
e->qsent = outnet_serviced_query(worker->back, qname,
qnamelen, qtype, qclass, flags, dnssec, want_dnssec, nocaps,
- q->env->cfg->tcp_upstream, q->env->cfg->ssl_upstream, addr,
- addrlen, zone, zonelen, worker_handle_service_reply, e,
+ q->env->cfg->tcp_upstream, q->env->cfg->ssl_upstream, opt_list,
+ addr, addrlen, zone, zonelen, worker_handle_service_reply, e,
worker->back->udp_buff);
if(!e->qsent) {
return NULL;
size_t ATTR_UNUSED(qnamelen), uint16_t ATTR_UNUSED(qtype),
uint16_t ATTR_UNUSED(qclass), uint16_t ATTR_UNUSED(flags),
int ATTR_UNUSED(dnssec), int ATTR_UNUSED(want_dnssec),
- int ATTR_UNUSED(nocaps), struct sockaddr_storage* ATTR_UNUSED(addr),
+ int ATTR_UNUSED(nocaps), struct edns_option* ATTR_UNUSED(opt_list),
+ struct sockaddr_storage* ATTR_UNUSED(addr),
socklen_t ATTR_UNUSED(addrlen), uint8_t* ATTR_UNUSED(zone),
size_t ATTR_UNUSED(zonelen), struct module_qstate* ATTR_UNUSED(q))
{
* - An internal query.
* - A query for a record type other than AAAA.
* - CD FLAG was set on querier
- * - An AAAA query for which an error was returned.
+ * - An AAAA query for which an error was returned.(qstate.return_rcode)
+ * -> treated as servfail thus synthesize (sec 5.1.3 6147), thus
+ * synthesize in (sec 5.1.2 of RFC6147).
* - A successful AAAA query with an answer.
*/
if ( (enum dns64_qstate)qstate->minfo[id] == DNS64_INTERNAL_QUERY
|| qstate->qinfo.qtype != LDNS_RR_TYPE_AAAA
|| (qstate->query_flags & BIT_CD)
- || qstate->return_rcode != LDNS_RCODE_NOERROR
|| (qstate->return_msg &&
qstate->return_msg->rep &&
reply_find_answer_rrset(&qstate->qinfo,
return;
dt_msg_init(env, &dm, DNSTAP__MESSAGE__TYPE__FORWARDER_RESPONSE);
} else {
- if (!env->log_resolver_query_messages)
+ if (!env->log_resolver_response_messages)
return;
dt_msg_init(env, &dm, DNSTAP__MESSAGE__TYPE__RESOLVER_RESPONSE);
}
+2 June 2016: Wouter
+ - Fix libubound for edns optlist feature.
+ - Fix distinction between free and CRYPTO_free in dsa and ecdsa alloc.
+ - Fix #752: retry resource temporarily unavailable on control pipe.
+ - un-document localzone tags.
+
+31 May 2016: Wouter
+ - Fix windows service to be created run with limited rights, as a
+ network service account, from Mario Turschmann.
+ - compat strsep implementation.
+ - generic edns option parse and store code.
+ - and also generic edns options for upstream messages (and replies).
+ after parse use edns_opt_find(edns.opt_list, LDNS_EDNS_NSID),
+ to insert use edns_opt_append(edns, region, code, len, bindata) on
+ the opt_list passed to send_query, or in edns_opt_inplace_reply.
+
+30 May 2016: Wouter
+ - Fix time in case answer comes from cache in ub_resolve_event().
+ - Attempted fix for #765: _unboundmodule missing for python3.
+
+27 May 2016: Wouter
+ - Fix #770: Small subgroup attack on DH used in unix pipe on localhost
+ if unbound control uses a unix local named pipe.
+ - Document write permission to directory of trust anchor needed.
+ - Fix #768: Unbound Service Sometimes Can Not Shutdown
+ Completely, WER Report Shown Up. Close handle before closing WSA.
+
+26 May 2016: Wouter
+ - Updated patch from Charles Walker.
+
+24 May 2016: Wouter
+ - disable-dnssec-lame-check config option from Charles Walker.
+ - remove memory leak from lame-check patch.
+ - iana portlist update.
+
+23 May 2016: Wouter
+ - Fix #767: Reference to an expired Internet-Draft in
+ harden-below-nxdomain documentation.
+
+20 May 2016: Ralph
+ - No QNAME minimisation fall-back for NXDOMAIN answers from DNSSEC
+ signed zones.
+ - iana portlist update.
+
+19 May 2016: Wouter
+ - Fix #766: dns64 should synthesize results on timeout/errors.
+
+18 May 2016: Wouter
+ - Fix #761: DNSSEC LAME false positive resolving nic.club.
+
+17 May 2016: Wouter
+ - trunk updated with output of flex 2.6.0.
+
+6 May 2016: Wouter
+ - Fix memory leak in out-of-memory conditions of local zone add.
+
+29 April 2016: Wouter
+ - Fix sldns with static checking fixes copied from getdns.
+
+28 April 2016: Wouter
+ - Fix #759: 0x20 capsforid no longer checks type PTR, for
+ compatibility with cisco dns guard. This lowers false positives.
+
+18 April 2016: Wouter
+ - Fix some malformed reponses to edns queries get fallback to nonedns.
+
+15 April 2016: Wouter
+ - cachedb module event handling design.
+
+14 April 2016: Wouter
+ - cachedb module framework (empty).
+ - iana portlist update.
+
+12 April 2016: Wouter
+ - Fix #753: document dump_requestlist is for first thread.
+
+24 March 2016: Wouter
+ - Document permit-small-holddown for 5011 debug.
+ - Fix #749: unbound-checkconf gets SIGSEGV when use against a
+ malformatted conf file.
+
+23 March 2016: Wouter
+ - OpenSSL 1.1.0 portability, --disable-dsa configure option.
+
+21 March 2016: Wouter
+ - Fix compile of getentropy_linux for SLES11 servicepack 4.
+ - Fix dnstap-log-resolver-response-messages, from Nikolay Edigaryev.
+ - Fix test for openssl to use HMAC_Update for 1.1.0.
+ - acx_nlnetlabs.m4 to v33, with HMAC_Update.
+ - acx_nlnetlabs.m4 to v34, with -ldl -pthread test for libcrypto.
+ - ERR_remove_state deprecated since openssl 1.0.0.
+ - OPENSSL_config is deprecated, removing.
+
+18 March 2016: Ralph
+ - Validate QNAME minimised NXDOMAIN responses.
+ - If QNAME minimisation is enabled, do cache lookup for QTYPE NS in
+ harden-below-nxdomain.
+
+17 March 2016: Ralph
+ - Limit number of QNAME minimisation iterations.
+
+17 March 2016: Wouter
+ - Fix #746: Fix unbound sets CD bit on all forwards.
+ If no trust anchors, it'll not set CD bit when forwarding to another
+ server. If a trust anchor, no CD bit on the first attempt to a
+ forwarder, but CD bit thereafter on repeated attempts to get DNSSEC.
+ - iana portlist update.
+
+16 March 2016: Wouter
+ - Fix ip-transparent for ipv6 on FreeBSD, thanks to Nick Hibma.
+ - Fix ip-transparent for tcp on freebsd.
+
+15 March 2016: Wouter
+ - ip_freebind: yesno option in unbound.conf sets IP_FREEBIND for
+ binding to an IP address while the interface or address is down.
+
+14 March 2016: Wouter
+ - Fix warnings in ifdef corner case, older or unknown libevent.
+ - Fix compile for ub_event code with older libev.
+
+11 March 2016: Wouter
+ - Remove warning about unused parameter in event_pluggable.c.
+ - Fix libev usage of dispatch return value.
+ - No side effects in tolower() call, in case it is a macro.
+ - For test put free in pluggable api in parenthesis.
+
+10 March 2016: Wouter
+ - Fixup backend2str for libev.
+
+09 March 2016: Willem
+ - User defined pluggable event API for libunbound
+ - Fixup of compile fix for pluggable event API from P.Y. Adi
+ Prasaja.
+
+09 March 2016: Wouter
+ - Updated configure and ltmain.sh.
+ - Updated L root IPv6 address.
+
+07 March 2016: Wouter
+ - Fix #747: assert in outnet_serviced_query_stop.
+ - iana ports fetched via https.
+ - iana portlist update.
+
+03 March 2016: Wouter
+ - configure tests for the weak attribute support by the compiler.
+
+02 March 2016: Wouter
+ - 1.5.8 release tag
+ - trunk contains 1.5.9 in development.
+ - iana portlist update.
+ - Fix #745: unbound.py - idn2dname throws UnicodeError when idnname
+ contains trailing dot.
+
24 February 2016: Wouter
- Fix OpenBSD asynclook lock free that gets used later (fix test code).
- Fix that NSEC3 negative cache is used when there is no salt.
-README for Unbound 1.5.8
+README for Unbound 1.5.9
Copyright 2007 NLnet Labs
http://unbound.net
#
# Example configuration file.
#
-# See unbound.conf(5) man page, version 1.5.8.
+# See unbound.conf(5) man page, version 1.5.9.
#
# this is a comment.
# (uses IP_BINDANY on FreeBSD).
# ip-transparent: no
+ # use IP_FREEBIND so the interface: addresses can be non-local
+ # and you can bind to nonexisting IPs and interfaces that are down.
+ # Linux only. On Linux you also have ip-transparent that is similar.
+ # ip-freebind: no
+
# EDNS reassembly buffer to advertise to UDP peers (the actual buffer
# is set with msg-buffer-size). 1480 can solve fragmentation (timeouts).
# edns-buffer-size: 4096
# into response messages when those sections are not required.
# minimal-responses: no
+ # true to disable DNSSEC lameness check in iterator.
+ # disable-dnssec-lame-check: no
+
# module configuration of the server. A string with identifiers
# separated by spaces. Syntax: "[dns64] [validator] iterator"
# module-config: "validator iterator"
# If the value 0 is given, missing anchors are not removed.
# keep-missing: 31622400 # 366 days
- # debug option that allows very small holddown times for key rollover
+ # debug option that allows very small holddown times for key rollover,
+ # otherwise the RFC mandates probe intervals must be at least 1 hour.
# permit-small-holddown: no
# the amount of memory to use for the key cache.
-.TH "libunbound" "3" "Mar 2, 2016" "NLnet Labs" "unbound 1.5.8"
+.TH "libunbound" "3" "Jun 9, 2016" "NLnet Labs" "unbound 1.5.9"
.\"
.\" libunbound.3 -- unbound library functions manual
.\"
.B ub_ctx_zone_remove,
.B ub_ctx_data_add,
.B ub_ctx_data_remove
-\- Unbound DNS validating resolver 1.5.8 functions.
+\- Unbound DNS validating resolver 1.5.9 functions.
.SH "SYNOPSIS"
.B #include <unbound.h>
.LP
-.TH "unbound-anchor" "8" "Mar 2, 2016" "NLnet Labs" "unbound 1.5.8"
+.TH "unbound-anchor" "8" "Jun 9, 2016" "NLnet Labs" "unbound 1.5.9"
.\"
.\" unbound-anchor.8 -- unbound anchor maintenance utility manual
.\"
-.TH "unbound-checkconf" "8" "Mar 2, 2016" "NLnet Labs" "unbound 1.5.8"
+.TH "unbound-checkconf" "8" "Jun 9, 2016" "NLnet Labs" "unbound 1.5.9"
.\"
.\" unbound-checkconf.8 -- unbound configuration checker manual
.\"
-.TH "unbound-control" "8" "Mar 2, 2016" "NLnet Labs" "unbound 1.5.8"
+.TH "unbound-control" "8" "Jun 9, 2016" "NLnet Labs" "unbound 1.5.9"
.\"
.\" unbound-control.8 -- unbound remote control manual
.\"
-.TH "unbound\-host" "1" "Mar 2, 2016" "NLnet Labs" "unbound 1.5.8"
+.TH "unbound\-host" "1" "Jun 9, 2016" "NLnet Labs" "unbound 1.5.9"
.\"
.\" unbound-host.1 -- unbound DNS lookup utility
.\"
-.TH "unbound" "8" "Mar 2, 2016" "NLnet Labs" "unbound 1.5.8"
+.TH "unbound" "8" "Jun 9, 2016" "NLnet Labs" "unbound 1.5.9"
.\"
.\" unbound.8 -- unbound manual
.\"
.\"
.SH "NAME"
.B unbound
-\- Unbound DNS validating resolver 1.5.8.
+\- Unbound DNS validating resolver 1.5.9.
.SH "SYNOPSIS"
.B unbound
.RB [ \-h ]
-.TH "unbound.conf" "5" "Mar 2, 2016" "NLnet Labs" "unbound 1.5.8"
+.TH "unbound.conf" "5" "Jun 9, 2016" "NLnet Labs" "unbound 1.5.9"
.\"
.\" unbound.conf.5 -- unbound.conf manual
.\"
provides service on. This option needs unbound to be started with root
permissions on some systems. The option uses IP_BINDANY on FreeBSD systems.
.TP
+.B ip\-freebind: \fI<yes or no>
+If yes, then use IP_FREEBIND socket option on sockets where unbound
+is listening to incoming traffic. Default no. Allows you to bind to
+IP addresses that are nonlocal or do not exist, like when the network
+interface or IP adress is down. Exists only on Linux, where the similar
+ip\-transparent option is also available.
+.TP
.B rrset\-cache\-size: \fI<number>
Number of bytes size of the RRset cache. Default is 4 megabytes.
A plain number is in bytes, append 'k', 'm' or 'g' for kilobytes, megabytes
IP address lookups), and thus may be incompatible with this. To try to avoid
this only DNSSEC-secure nxdomains are used, because the old software does not
have DNSSEC. Default is off.
+Currently, draft\-ietf\-dnsop\-nxdomain\-cut promotes this technique.
.TP
.B harden\-referral\-path: \fI<yes or no>
Harden the referral path by performing additional queries for
protocol RFCs mandate these sections, and the additional content could
be of use and save roundtrips for clients.
.TP
+.B disable-dnssec-lame-check: \fI<yes or no>
+If true, disables the DNSSEC lameness check in the iterator. This check
+sees if RRSIGs are present in the answer, when dnssec is expected,
+and retries another authority if RRSIGs are unexpectedly missing.
+The validator will insist in RRSIGs for DNSSEC signed domains regardless
+of this setting, if a trust anchor is loaded.
+.TP
.B module\-config: \fI<"module names">
Module configuration, a list of module names separated by spaces, surround
the string with quotes (""). The modules can be validator, iterator.
The probes are several times per month, thus the machine must be online
frequently. The initial file can be one with contents as described in
\fBtrust\-anchor\-file\fR. The file is written to when the anchor is updated,
-so the unbound user must have write permission.
+so the unbound user must have write permission. Write permission to the file,
+but also to the directory it is in (to create a temporary file, which is
+necessary to deal with filesystem full events).
.TP
.B trust\-anchor: \fI<"Resource Record">
A DS or DNSKEY RR for a key to use for validation. Multiple entries can be
uint8_t* dname;
size_t dname_len;
for(s = cfg->stubs; s; s = s->next) {
+ if(!s->name) continue;
dname = sldns_str2wire_dname(s->name, &dname_len);
if(!dname) {
log_err("cannot parse stub name '%s'", s->name);
if(!ah(dp, "I.ROOT-SERVERS.NET.", "2001:7fe::53")) goto failed;
if(!ah(dp, "J.ROOT-SERVERS.NET.", "2001:503:c27::2:30")) goto failed;
if(!ah(dp, "K.ROOT-SERVERS.NET.", "2001:7fd::1")) goto failed;
- if(!ah(dp, "L.ROOT-SERVERS.NET.", "2001:500:3::42")) goto failed;
+ if(!ah(dp, "L.ROOT-SERVERS.NET.", "2001:500:9f::42")) goto failed;
if(!ah(dp, "M.ROOT-SERVERS.NET.", "2001:dc3::35")) goto failed;
}
return dp;
return 1;
}
+int
+iter_indicates_dnssec_fwd(struct module_env* env, struct query_info *qinfo)
+{
+ struct trust_anchor* a;
+ if(!env || !env->anchors || !qinfo || !qinfo->qname)
+ return 0;
+ /* a trust anchor exists above the name? */
+ if((a=anchors_lookup(env->anchors, qinfo->qname, qinfo->qname_len,
+ qinfo->qclass))) {
+ if(a->numDS == 0 && a->numDNSKEY == 0) {
+ /* insecure trust point */
+ lock_basic_unlock(&a->lock);
+ return 0;
+ }
+ lock_basic_unlock(&a->lock);
+ return 1;
+ }
+ /* no trust anchor above it. */
+ return 0;
+}
+
int
iter_indicates_dnssec(struct module_env* env, struct delegpt* dp,
struct dns_msg* msg, uint16_t dclass)
int iter_dp_is_useless(struct query_info* qinfo, uint16_t qflags,
struct delegpt* dp);
+/**
+ * See if qname has DNSSEC needs in the forwarding case. This is true if
+ * there is a trust anchor above it. Whether there is an insecure delegation
+ * to the data is unknown, but CD-retry is needed.
+ * @param env: environment with anchors.
+ * @param qinfo: query name and class.
+ * @return true if trust anchor above qname, false if no anchor or insecure
+ * point above qname.
+ */
+int iter_indicates_dnssec_fwd(struct module_env* env,
+ struct query_info *qinfo);
+
/**
* See if delegation is expected to have DNSSEC information (RRSIGs) in
* its answers, or not. Inspects delegation point (name), trust anchors,
log_err("iterator: could not apply configuration settings.");
return 0;
}
- if(env->cfg->qname_minimisation) {
- uint8_t dname[LDNS_MAX_DOMAINLEN+1];
- size_t len = sizeof(dname);
- if(sldns_str2wire_dname_buf("ip6.arpa.", dname, &len) != 0) {
- log_err("ip6.arpa. parse error");
- return 0;
- }
- iter_env->ip6arpa_dname = (uint8_t*)malloc(len);
- if(!iter_env->ip6arpa_dname) {
- log_err("malloc failure");
- return 0;
- }
- memcpy(iter_env->ip6arpa_dname, dname, len);
- }
return 1;
}
if(!env || !env->modinfo[id])
return;
iter_env = (struct iter_env*)env->modinfo[id];
- free(iter_env->ip6arpa_dname);
free(iter_env->target_fetch_policy);
priv_delete(iter_env->priv);
donotq_delete(iter_env->donotq);
/* Start with the (current) qname. */
iq->qchase = qstate->qinfo;
outbound_list_init(&iq->outlist);
+ iq->minimise_count = 0;
if (qstate->env->cfg->qname_minimisation)
iq->minimisation_state = INIT_MINIMISE_STATE;
else
int tf_policy;
struct delegpt_addr* target;
struct outbound_entry* outq;
+ /* EDNS options to set on outgoing packet */
+ struct edns_option* opt_list = NULL;
/* NOTE: a request will encounter this state for each target it
* needs to send a query to. That is, at least one per referral,
}
if(iq->minimisation_state == INIT_MINIMISE_STATE) {
- /* (Re)set qinfo_out to (new) delegation point, except
- * when qinfo_out is already a subdomain of dp. This happens
- * when resolving ip6.arpa dnames. */
+ /* (Re)set qinfo_out to (new) delegation point, except when
+ * qinfo_out is already a subdomain of dp. This happens when
+ * increasing by more than one label at once (QNAMEs with more
+ * than MAX_MINIMISE_COUNT labels). */
if(!(iq->qinfo_out.qname_len
&& dname_subdomain_c(iq->qchase.qname,
iq->qinfo_out.qname)
iq->qinfo_out.qname_len = iq->dp->namelen;
iq->qinfo_out.qtype = LDNS_RR_TYPE_NS;
iq->qinfo_out.qclass = iq->qchase.qclass;
+ iq->minimise_count = 0;
}
iq->minimisation_state = MINIMISE_STATE;
}
if(iq->minimisation_state == MINIMISE_STATE) {
- int labdiff = dname_count_labels(iq->qchase.qname) -
+ int qchaselabs = dname_count_labels(iq->qchase.qname);
+ int labdiff = qchaselabs -
dname_count_labels(iq->qinfo_out.qname);
iq->qinfo_out.qname = iq->qchase.qname;
iq->qinfo_out.qname_len = iq->qchase.qname_len;
+ iq->minimise_count++;
- /* Special treatment for ip6.arpa lookups.
- * Reverse IPv6 dname has 34 labels, increment the IP part
- * (usually first 32 labels) by 8 labels (7 more than the
- * default 1 label increment). */
- if(labdiff <= 32 &&
- dname_subdomain_c(iq->qchase.qname, ie->ip6arpa_dname)) {
- labdiff -= 7;
- /* Small chance of zone cut after first label. Stop
- * minimising */
- if(labdiff <= 1)
- labdiff = 0;
+ /* Limit number of iterations for QNAMEs with more
+ * than MAX_MINIMISE_COUNT labels. Send first MINIMISE_ONE_LAB
+ * labels of QNAME always individually.
+ */
+ if(qchaselabs > MAX_MINIMISE_COUNT && labdiff > 1 &&
+ iq->minimise_count > MINIMISE_ONE_LAB) {
+ if(iq->minimise_count < MAX_MINIMISE_COUNT) {
+ int multilabs = qchaselabs - 1 -
+ MINIMISE_ONE_LAB;
+ int extralabs = multilabs /
+ MINIMISE_MULTIPLE_LABS;
+
+ if (MAX_MINIMISE_COUNT - iq->minimise_count >=
+ multilabs % MINIMISE_MULTIPLE_LABS)
+ /* Default behaviour is to add 1 label
+ * every iteration. Therefore, decrement
+ * the extralabs by 1 */
+ extralabs--;
+ if (extralabs < labdiff)
+ labdiff -= extralabs;
+ else
+ labdiff = 1;
+ }
+ /* Last minimised iteration, send all labels with
+ * QTYPE=NS */
+ else
+ labdiff = 1;
}
if(labdiff > 1) {
* cached as NOERROR/NODATA */
return 1;
}
-
}
if(iq->minimisation_state == SKIP_MINIMISE_STATE)
/* Do not increment qname, continue incrementing next
outq = (*qstate->env->send_query)(
iq->qinfo_out.qname, iq->qinfo_out.qname_len,
iq->qinfo_out.qtype, iq->qinfo_out.qclass,
- iq->chase_flags | (iq->chase_to_rd?BIT_RD:0), EDNS_DO|BIT_CD,
+ iq->chase_flags | (iq->chase_to_rd?BIT_RD:0),
+ /* unset CD if to forwarder(RD set) and not dnssec retry
+ * (blacklist nonempty) and no trust-anchors are configured
+ * above the qname or on the first attempt when dnssec is on */
+ EDNS_DO| ((iq->chase_to_rd||(iq->chase_flags&BIT_RD)!=0)&&
+ !qstate->blacklist&&(!iter_indicates_dnssec_fwd(qstate->env,
+ &iq->qinfo_out)||target->attempts==1)?0:BIT_CD),
iq->dnssec_expected, iq->caps_fallback || is_caps_whitelisted(
- ie, iq), &target->addr, target->addrlen, iq->dp->name,
- iq->dp->namelen, qstate);
+ ie, iq), opt_list, &target->addr, target->addrlen,
+ iq->dp->name, iq->dp->namelen, qstate);
if(!outq) {
log_addr(VERB_DETAIL, "error sending query to auth server",
&target->addr, target->addrlen);
* differently. No queries should be sent elsewhere */
type = RESPONSE_TYPE_ANSWER;
}
- if(iq->dnssec_expected && !iq->dnssec_lame_query &&
+ if(!qstate->env->cfg->disable_dnssec_lame_check && iq->dnssec_expected
+ && !iq->dnssec_lame_query &&
!(iq->chase_flags&BIT_RD)
+ && iq->sent_count < DNSSEC_LAME_DETECT_COUNT
&& type != RESPONSE_TYPE_LAME
&& type != RESPONSE_TYPE_REC_LAME
&& type != RESPONSE_TYPE_THROWAWAY
if(iq->minimisation_state != DONOT_MINIMISE_STATE) {
/* Best effort qname-minimisation.
* Stop minimising and send full query when RCODE
- * is not NOERROR */
+ * is not NOERROR. */
if(FLAGS_GET_RCODE(iq->response->rep->flags) !=
LDNS_RCODE_NOERROR)
iq->minimisation_state = DONOT_MINIMISE_STATE;
+ if(FLAGS_GET_RCODE(iq->response->rep->flags) ==
+ LDNS_RCODE_NXDOMAIN) {
+ /* Stop resolving when NXDOMAIN is DNSSEC
+ * signed. Based on assumption that namservers
+ * serving signed zones do not return NXDOMAIN
+ * for empty-non-terminals. */
+ if(iq->dnssec_expected)
+ return final_state(iq);
+ /* Make subrequest to validate intermediate
+ * NXDOMAIN if harden-below-nxdomain is
+ * enabled. */
+ if(qstate->env->cfg->harden_below_nxdomain) {
+ struct module_qstate* subq = NULL;
+ log_query_info(VERB_QUERY,
+ "schedule NXDOMAIN validation:",
+ &iq->response->qinfo);
+ if(!generate_sub_request(
+ iq->response->qinfo.qname,
+ iq->response->qinfo.qname_len,
+ iq->response->qinfo.qtype,
+ iq->response->qinfo.qclass,
+ qstate, id, iq,
+ INIT_REQUEST_STATE,
+ FINISHED_STATE, &subq, 1))
+ verbose(VERB_ALGO,
+ "could not validate NXDOMAIN "
+ "response");
+ }
+ }
return next_state(iq, QUERYTARGETS_STATE);
}
return final_state(iq);
goto handle_it;
}
/* edns is not examined, but removed from message to help cache */
- if(parse_extract_edns(prs, &edns) != LDNS_RCODE_NOERROR)
+ if(parse_extract_edns(prs, &edns, qstate->env->scratch) !=
+ LDNS_RCODE_NOERROR)
goto handle_it;
/* remove CD-bit, we asked for in case we handle validation ourself */
prs->flags &= ~BIT_CD;
#define MAX_REFERRAL_COUNT 130
/** max number of queries-sent-out. Make sure large NS set does not loop */
#define MAX_SENT_COUNT 32
+/** max number of queries for which to perform dnsseclameness detection,
+ * (rrsigs misssing detection) after that, just pick up that response */
+#define DNSSEC_LAME_DETECT_COUNT 4
+/**
+ * max number of QNAME minimisation iterations. Limits number of queries for
+ * QNAMEs with a lot of labels.
+*/
+#define MAX_MINIMISE_COUNT 10
+/**
+ * number of labels from QNAME that are always send individually when using
+ * QNAME minimisation, even when the number of labels of the QNAME is bigger
+ * tham MAX_MINIMISE_COUNT */
+#define MINIMISE_ONE_LAB 4
+#define MINIMISE_MULTIPLE_LABS (MAX_MINIMISE_COUNT - MINIMISE_ONE_LAB)
/** at what query-sent-count to stop target fetch policy */
#define TARGET_FETCH_STOP 3
/** how nice is a server without further information, in msec
/** list of pending queries to authoritative servers. */
struct outbound_list outlist;
- /** QNAME minimisation state */
+ /** QNAME minimisation state, RFC7816 */
enum minimisation_state minimisation_state;
/**
* when qname minimisation is enabled.
*/
struct query_info qinfo_out;
+
+ /**
+ * Count number of QNAME minisation iterations. Used to limit number of
+ * outgoing queries when QNAME minimisation is enabled.
+ */
+ int minimise_count;
};
/**
struct libworker;
struct tube;
struct sldns_buffer;
-struct event_base;
+struct ub_event_base;
/**
* The context structure
struct ub_randstate* seed_rnd;
/** event base for event oriented interface */
- struct event_base* event_base;
+ struct ub_event_base* event_base;
/** libworker for event based interface */
struct libworker* event_worker;
#include "util/random.h"
#include "util/net_help.h"
#include "util/tube.h"
+#include "util/ub_event.h"
#include "services/modstack.h"
#include "services/localzone.h"
#include "services/cache/infra.h"
return ctx;
}
+struct ub_ctx*
+ub_ctx_create_ub_event(struct ub_event_base* ueb)
+{
+ struct ub_ctx* ctx = ub_ctx_create_nopipe();
+ if(!ctx)
+ return NULL;
+ /* no pipes, but we have the locks to make sure everything works */
+ ctx->created_bg = 0;
+ ctx->dothread = 1; /* the processing is in the same process,
+ makes ub_cancel and ub_ctx_delete do the right thing */
+ ctx->event_base = ueb;
+ return ctx;
+}
+
struct ub_ctx*
ub_ctx_create_event(struct event_base* eb)
{
ctx->created_bg = 0;
ctx->dothread = 1; /* the processing is in the same process,
makes ub_cancel and ub_ctx_delete do the right thing */
- ctx->event_base = eb;
+ ctx->event_base = ub_libevent_event_base(eb);
+ if (!ctx->event_base) {
+ ub_ctx_delete(ctx);
+ return NULL;
+ }
return ctx;
}
}
}
+ /* set time in case answer comes from cache */
+ ub_comm_base_now(ctx->event_worker->base);
+
/* create new ctx_query and attempt to add to the list */
q = context_new(ctx, name, rrtype, rrclass, (ub_callback_t)callback,
mydata);
int
ub_ctx_set_event(struct ub_ctx* ctx, struct event_base* base) {
+ struct ub_event_base* new_base;
+
if (!ctx || !ctx->event_base || !base) {
return UB_INITFAIL;
}
- if (ctx->event_base == base) {
+ if (ub_libevent_get_event_base(ctx->event_base) == base) {
/* already set */
return UB_NOERROR;
}
/* destroy the current worker - safe to pass in NULL */
libworker_delete_event(ctx->event_worker);
ctx->event_worker = NULL;
- ctx->event_base = base;
+ new_base = ub_libevent_event_base(base);
+ if (new_base)
+ ctx->event_base = new_base;
ctx->created_bg = 0;
ctx->dothread = 1;
lock_basic_unlock(&ctx->cfglock);
- return UB_NOERROR;
+ return new_base ? UB_NOERROR : UB_INITFAIL;
}
/** setup fresh libworker struct */
static struct libworker*
-libworker_setup(struct ub_ctx* ctx, int is_bg, struct event_base* eb)
+libworker_setup(struct ub_ctx* ctx, int is_bg, struct ub_event_base* eb)
{
unsigned int seed;
struct libworker* w = (struct libworker*)calloc(1, sizeof(*w));
}
struct libworker* libworker_create_event(struct ub_ctx* ctx,
- struct event_base* eb)
+ struct ub_event_base* eb)
{
return libworker_setup(ctx, 0, eb);
}
edns->ext_rcode = 0;
edns->edns_version = 0;
edns->bits = EDNS_DO;
+ edns->opt_list = NULL;
if(sldns_buffer_capacity(w->back->udp_buff) < 65535)
edns->udp_size = (uint16_t)sldns_buffer_capacity(
w->back->udp_buff);
struct outbound_entry* libworker_send_query(uint8_t* qname, size_t qnamelen,
uint16_t qtype, uint16_t qclass, uint16_t flags, int dnssec,
- int want_dnssec, int nocaps, struct sockaddr_storage* addr,
- socklen_t addrlen, uint8_t* zone, size_t zonelen,
- struct module_qstate* q)
+ int want_dnssec, int nocaps, struct edns_option* opt_list,
+ struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
+ size_t zonelen, struct module_qstate* q)
{
struct libworker* w = (struct libworker*)q->env->worker;
struct outbound_entry* e = (struct outbound_entry*)regional_alloc(
e->qstate = q;
e->qsent = outnet_serviced_query(w->back, qname,
qnamelen, qtype, qclass, flags, dnssec, want_dnssec, nocaps,
- q->env->cfg->tcp_upstream, q->env->cfg->ssl_upstream, addr,
- addrlen, zone, zonelen, libworker_handle_service_reply, e,
- w->back->udp_buff);
+ q->env->cfg->tcp_upstream, q->env->cfg->ssl_upstream, opt_list,
+ addr, addrlen, zone, zonelen, libworker_handle_service_reply,
+ e, w->back->udp_buff);
if(!e->qsent) {
return NULL;
}
size_t ATTR_UNUSED(qnamelen), uint16_t ATTR_UNUSED(qtype),
uint16_t ATTR_UNUSED(qclass), uint16_t ATTR_UNUSED(flags),
int ATTR_UNUSED(dnssec), int ATTR_UNUSED(want_dnssec),
- int ATTR_UNUSED(nocaps), struct sockaddr_storage* ATTR_UNUSED(addr),
+ int ATTR_UNUSED(nocaps), struct edns_option* ATTR_UNUSED(opt_list),
+ struct sockaddr_storage* ATTR_UNUSED(addr),
socklen_t ATTR_UNUSED(addrlen), uint8_t* ATTR_UNUSED(zone),
size_t ATTR_UNUSED(zonelen), struct module_qstate* ATTR_UNUSED(q))
{
struct regional;
struct tube;
struct sldns_buffer;
-struct event_base;
+struct ub_event_base;
/**
* The library-worker status structure
* @return new worker or NULL.
*/
struct libworker* libworker_create_event(struct ub_ctx* ctx,
- struct event_base* eb);
+ struct ub_event_base* eb);
/**
* Attach context_query to mesh for callback in event-driven setup.
ub_ctx_config
ub_ctx_create
ub_ctx_create_event
+ub_ctx_create_ub_event
ub_ctx_data_add
ub_ctx_data_remove
ub_ctx_debuglevel
/**
* \file
*
- * This file contains the unbound interface for use with libevent.
- * You have to use the same libevent that unbound was compiled with,
- * otherwise it wouldn't work, the event and event_base structures would
- * be different. If unbound is compiled without libevent support then
- * this header file is not supposed to be installed on the system.
+ * This file contains the unbound interface for use with user defined
+ * pluggable event bases.
*
- * Use ub_ctx_create_event_base() to create an unbound context that uses
- * the event base that you have made. Then, use the ub_resolve_event call
- * to add DNS resolve queries to the context. Those then run when you
- * call event_dispatch() on your event_base, and when they are done you
- * get a function callback.
+ * Use ub_ctx_create_event_ub_base() to create an unbound context that uses
+ * the user provided event base API. Then, use the ub_resolve_event call
+ * to add DNS resolve queries to the context. Those then run whith the
+ * provided event_base, and when they are done you get a function callback.
*
* This method does not fork another process or create a thread, the effort
- * is done by the unbound state machines that are connected to the event_base.
+ * is done by the unbound state machines that are connected to the event base.
+ *
+ * It is also possible to provide a libevent based event base by using
+ * ub_ctx_create_event_base(). But you have to use the same libevent that
+ * unbound was compiled with, otherwise it wouldn't work, the event and
+ * event_base structures would be different.
*/
#ifndef _UB_UNBOUND_EVENT_H
#define _UB_UNBOUND_EVENT_H
struct ub_result;
struct event_base;
+/** event timeout */
+#define UB_EV_TIMEOUT 0x01
+/** event fd readable */
+#define UB_EV_READ 0x02
+/** event fd writable */
+#define UB_EV_WRITE 0x04
+/** event signal */
+#define UB_EV_SIGNAL 0x08
+/** event must persist */
+#define UB_EV_PERSIST 0x10
+
+/** magic number to identify this version of the pluggable event api */
+#define UB_EVENT_MAGIC 0x44d74d78
+
+struct ub_event;
+struct ub_event_base;
+struct timeval;
+
+/**
+ * The Virtual Method Table for and ub_event_base "object"
+ */
+struct ub_event_base_vmt {
+ /** Destructor for the ub_event_base object,
+ * (not called by libunbound) */
+ void (*free)(struct ub_event_base*);
+ /** Run the event loop
+ * (not called by libunbound when using ub_resolve_event) */
+ int (*dispatch)(struct ub_event_base*);
+ /** Exit the given event loop */
+ int (*loopexit)(struct ub_event_base*, struct timeval*);
+ /** Instantiate a new ub_event associated with this event base */
+ struct ub_event* (*new_event)(struct ub_event_base*,
+ int fd, short bits, void (*cb)(int, short, void*), void* arg);
+ /** Instantiate a new signal associated with this event base,
+ * (not called by libunbound) */
+ struct ub_event* (*new_signal)(struct ub_event_base*, int fd,
+ void (*cb)(int, short, void*), void* arg);
+ /** Create a new ub_event associated with the given wsaevent,
+ * (not called by libunbound) */
+ struct ub_event* (*winsock_register_wsaevent)(struct ub_event_base*,
+ void* wsaevent, void (*cb)(int, short, void*), void* arg);
+};
+
+/**
+ * A user defined pluggable event base is registered by providing a
+ * ub_event_base "object" with the ub_ctx_create_ub_event() function.
+ * The magic number must be correct and the Virtual Method Table must be
+ * fully equipped providing the event base API to be used by libunbound.
+ */
+struct ub_event_base {
+ /** magic must be UB_EVENT_MAGIC (0x44d74d78) */
+ unsigned long magic;
+ /** Virtual Method Table for ub_event_base */
+ struct ub_event_base_vmt* vmt;
+};
+
+/**
+ * The Virtual Method Table for and ub_event "object"
+ */
+struct ub_event_vmt {
+ /** Add event bits for this event to fire on.
+ * The event will be deactivated before this function is called. */
+ void (*add_bits)(struct ub_event*, short);
+ /** Configure the event so it will not longer fire on given bits
+ * The event will be deactivated before this function is called. */
+ void (*del_bits)(struct ub_event*, short);
+ /** Change or set the file descriptor on the event
+ * The event will be deactivated before this function is called. */
+ void (*set_fd)(struct ub_event*, int);
+ /** Destructor for the ub_event object */
+ void (*free)(struct ub_event*);
+ /** Activate the event. The given timeval is an timeout value. */
+ int (*add)(struct ub_event*, struct timeval*);
+ /** Deactivate the event */
+ int (*del)(struct ub_event*);
+ /** Reconfigure and activate a timeout event */
+ int (*add_timer)(struct ub_event*, struct ub_event_base*,
+ void (*cb)(int, short, void*), void* arg, struct timeval*);
+ /** Deactivate the timeout event */
+ int (*del_timer)(struct ub_event*);
+ /** Activate a signal event (not called by libunbound). */
+ int (*add_signal)(struct ub_event*, struct timeval*);
+ /** Deactivate a signal event (not called by libunbound). */
+ int (*del_signal)(struct ub_event*);
+ /** Destructor for a ub_event associated with a wsaevent,
+ * (not called by libunbound)
+ */
+ void (*winsock_unregister_wsaevent)(struct ub_event* ev);
+ /** Libunbound will signal the eventloop when a TCP windows socket
+ * will block on next read or write (given by the eventbits), to work
+ * around edge trigger event behaviour of select on windows with TCP.
+ */
+ void (*winsock_tcp_wouldblock)(struct ub_event*, int eventbit);
+};
+
+/**
+ * An "object" comprising a user defined pluggable event.
+ * The magic number must be correct and the Virtual Method Table must be
+ * fully equipped providing the ub_event API to be used by libunbound.
+ */
+struct ub_event {
+ /** magic must be UB_EVENT_MAGIC (0x44d74d78) */
+ unsigned long magic;
+ /** Virtual Method Table for ub_event */
+ struct ub_event_vmt* vmt;
+};
+
typedef void (*ub_event_callback_t)(void*, int, void*, int, int, char*);
/**
* Create a resolving and validation context.
* The information from /etc/resolv.conf and /etc/hosts is not utilised by
* default. Use ub_ctx_resolvconf and ub_ctx_hosts to read them.
+ * @param base: the pluggable event base that the caller has created.
+ * The unbound context uses this event base.
+ * @return a new context. default initialisation.
+ * returns NULL on error.
+ * You must use ub_resolve_event with this context.
+ * Do not call ub_ctx_async, ub_poll, ub_wait, ub_process, this is all done
+ * with the event_base. Setup the options you like with the other functions.
+ */
+struct ub_ctx* ub_ctx_create_ub_event(struct ub_event_base* base);
+
+/**
+ * Create a resolving and validation context.
+ * The information from /etc/resolv.conf and /etc/hosts is not utilised by
+ * default. Use ub_ctx_resolvconf and ub_ctx_hosts to read them.
+ * You have to use the same libevent that unbound was compiled with,
+ * otherwise it wouldn't work, the event and event_base structures would
+ * be different.
* @param base: the event base that the caller has created. The unbound
* context uses this event base.
* @return a new context. default initialisation.
struct ub_ctx* ub_ctx_create_event(struct event_base* base);
/**
- * Set a new event_base on a context created with ub_ctx_create_event.
+ * Set a new libevent event_base on a context created with ub_ctx_create_event.
+ * You have to use the same libevent that unbound was compiled with,
+ * otherwise it wouldn't work, the event and event_base structures would
+ * be different.
* Any outbound queries will be canceled.
* @param ctx the ub_ctx to update. Must have been created with ub_ctx_create_event
* @param base the new event_base to attach to the ctx
struct comm_point;
struct module_qstate;
struct tube;
+struct edns_option;
/**
* Worker service routine to send serviced queries to authoritative servers.
* @param dnssec: if set, EDNS record will have DO bit set.
* @param want_dnssec: signatures needed.
* @param nocaps: ignore capsforid(if in config), do not perturb qname.
+ * @param opt_list: EDNS options on outgoing packet.
* @param addr: where to.
* @param addrlen: length of addr.
* @param zone: delegation point name.
*/
struct outbound_entry* libworker_send_query(uint8_t* qname, size_t qnamelen,
uint16_t qtype, uint16_t qclass, uint16_t flags, int dnssec,
- int want_dnssec, int nocaps, struct sockaddr_storage* addr,
- socklen_t addrlen, uint8_t* zone, size_t zonelen,
- struct module_qstate* q);
+ int want_dnssec, int nocaps, struct edns_option* opt_list,
+ struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
+ size_t zonelen, struct module_qstate* q);
/** process incoming replies from the network */
int libworker_handle_reply(struct comm_point* c, void* arg, int error,
* @param dnssec: if set, EDNS record will have DO bit set.
* @param want_dnssec: signatures needed.
* @param nocaps: ignore capsforid(if in config), do not perturb qname.
+ * @param opt_list: EDNS options on outgoing packet.
* @param addr: where to.
* @param addrlen: length of addr.
* @param zone: wireformat dname of the zone.
*/
struct outbound_entry* worker_send_query(uint8_t* qname, size_t qnamelen,
uint16_t qtype, uint16_t qclass, uint16_t flags, int dnssec,
- int want_dnssec, int nocaps, struct sockaddr_storage* addr,
- socklen_t addrlen, uint8_t* zone, size_t zonelen,
- struct module_qstate* q);
+ int want_dnssec, int nocaps, struct edns_option* opt_list,
+ struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
+ size_t zonelen, struct module_qstate* q);
/**
* process control messages from the main thread. Frees the control
dname_remove_label(&k.qname, &k.qname_len);
h = query_info_hash(&k, flags);
e = slabhash_lookup(env->msg_cache, h, &k, 0);
+ if(!e && k.qtype != LDNS_RR_TYPE_NS &&
+ env->cfg->qname_minimisation) {
+ k.qtype = LDNS_RR_TYPE_NS;
+ h = query_info_hash(&k, flags);
+ e = slabhash_lookup(env->msg_cache, h, &k, 0);
+ }
if(e) {
struct reply_info* data = (struct reply_info*)e->data;
struct dns_msg* msg;
}
lock_rw_unlock(&e->lock);
}
+ k.qtype = qtype;
}
/* fill common RR types for ANY response to avoid requery */
int
create_udp_sock(int family, int socktype, struct sockaddr* addr,
socklen_t addrlen, int v6only, int* inuse, int* noproto,
- int rcv, int snd, int listen, int* reuseport, int transparent)
+ int rcv, int snd, int listen, int* reuseport, int transparent,
+ int freebind)
{
int s;
-#if defined(SO_REUSEADDR) || defined(SO_REUSEPORT) || defined(IPV6_USE_MIN_MTU) || defined(IP_TRANSPARENT) || defined(IP_BINDANY)
+#if defined(SO_REUSEADDR) || defined(SO_REUSEPORT) || defined(IPV6_USE_MIN_MTU) || defined(IP_TRANSPARENT) || defined(IP_BINDANY) || defined(IP_FREEBIND)
int on=1;
#endif
#ifdef IPV6_MTU
#endif
#if !defined(IP_TRANSPARENT) && !defined(IP_BINDANY)
(void)transparent;
+#endif
+#if !defined(IP_FREEBIND)
+ (void)freebind;
#endif
if((s = socket(family, socktype, 0)) == -1) {
*inuse = 0;
#else
(void)reuseport;
#endif /* defined(SO_REUSEPORT) */
+#ifdef IP_FREEBIND
+ if (freebind &&
+ setsockopt(s, IPPROTO_IP, IP_FREEBIND, (void*)&on,
+ (socklen_t)sizeof(on)) < 0) {
+ log_warn("setsockopt(.. IP_FREEBIND ..) failed: %s",
+ strerror(errno));
+ }
+#endif /* IP_FREEBIND */
#ifdef IP_TRANSPARENT
if (transparent &&
setsockopt(s, IPPROTO_IP, IP_TRANSPARENT, (void*)&on,
#elif defined(IP_BINDANY)
if (transparent &&
setsockopt(s, (family==AF_INET6? IPPROTO_IPV6:IPPROTO_IP),
- IP_BINDANY, (void*)&on, (socklen_t)sizeof(on)) < 0) {
- log_warn("setsockopt(.. IP_BINDANY ..) failed: %s",
- strerror(errno));
+ (family == AF_INET6? IPV6_BINDANY:IP_BINDANY),
+ (void*)&on, (socklen_t)sizeof(on)) < 0) {
+ log_warn("setsockopt(.. IP%s_BINDANY ..) failed: %s",
+ (family==AF_INET6?"V6":""), strerror(errno));
}
#endif /* IP_TRANSPARENT || IP_BINDANY */
}
int
create_tcp_accept_sock(struct addrinfo *addr, int v6only, int* noproto,
- int* reuseport, int transparent, int mss)
+ int* reuseport, int transparent, int mss, int freebind)
{
int s;
-#if defined(SO_REUSEADDR) || defined(SO_REUSEPORT) || defined(IPV6_V6ONLY) || defined(IP_TRANSPARENT)
+#if defined(SO_REUSEADDR) || defined(SO_REUSEPORT) || defined(IPV6_V6ONLY) || defined(IP_TRANSPARENT) || defined(IP_BINDANY) || defined(IP_FREEBIND)
int on = 1;
#endif
-#ifndef IP_TRANSPARENT
+#if !defined(IP_TRANSPARENT) && !defined(IP_BINDANY)
(void)transparent;
+#endif
+#if !defined(IP_FREEBIND)
+ (void)freebind;
#endif
verbose_print_addr(addr);
*noproto = 0;
return -1;
}
#endif /* SO_REUSEADDR */
+#ifdef IP_FREEBIND
+ if (freebind && setsockopt(s, IPPROTO_IP, IP_FREEBIND, (void*)&on,
+ (socklen_t)sizeof(on)) < 0) {
+ log_warn("setsockopt(.. IP_FREEBIND ..) failed: %s",
+ strerror(errno));
+ }
+#endif /* IP_FREEBIND */
#ifdef SO_REUSEPORT
/* try to set SO_REUSEPORT so that incoming
* connections are distributed evenly among the receiving threads.
log_warn("setsockopt(.. IP_TRANSPARENT ..) failed: %s",
strerror(errno));
}
-#endif /* IP_TRANSPARENT */
+#elif defined(IP_BINDANY)
+ if (transparent &&
+ setsockopt(s, (addr->ai_family==AF_INET6? IPPROTO_IPV6:IPPROTO_IP),
+ (addr->ai_family == AF_INET6? IPV6_BINDANY:IP_BINDANY),
+ (void*)&on, (socklen_t)sizeof(on)) < 0) {
+ log_warn("setsockopt(.. IP%s_BINDANY ..) failed: %s",
+ (addr->ai_family==AF_INET6?"V6":""), strerror(errno));
+ }
+#endif /* IP_TRANSPARENT || IP_BINDANY */
if(bind(s, addr->ai_addr, addr->ai_addrlen) != 0) {
#ifndef USE_WINSOCK
/* detect freebsd jail with no ipv6 permission */
static int
make_sock(int stype, const char* ifname, const char* port,
struct addrinfo *hints, int v6only, int* noip6, size_t rcv, size_t snd,
- int* reuseport, int transparent, int tcp_mss)
+ int* reuseport, int transparent, int tcp_mss, int freebind)
{
struct addrinfo *res = NULL;
int r, s, inuse, noproto;
s = create_udp_sock(res->ai_family, res->ai_socktype,
(struct sockaddr*)res->ai_addr, res->ai_addrlen,
v6only, &inuse, &noproto, (int)rcv, (int)snd, 1,
- reuseport, transparent);
+ reuseport, transparent, freebind);
if(s == -1 && inuse) {
log_err("bind: address already in use");
} else if(s == -1 && noproto && hints->ai_family == AF_INET6){
}
} else {
s = create_tcp_accept_sock(res, v6only, &noproto, reuseport,
- transparent, tcp_mss);
+ transparent, tcp_mss, freebind);
if(s == -1 && noproto && hints->ai_family == AF_INET6){
*noip6 = 1;
}
static int
make_sock_port(int stype, const char* ifname, const char* port,
struct addrinfo *hints, int v6only, int* noip6, size_t rcv, size_t snd,
- int* reuseport, int transparent, int tcp_mss)
+ int* reuseport, int transparent, int tcp_mss, int freebind)
{
char* s = strchr(ifname, '@');
if(s) {
(void)strlcpy(p, s+1, sizeof(p));
p[strlen(s+1)]=0;
return make_sock(stype, newif, p, hints, v6only, noip6,
- rcv, snd, reuseport, transparent, tcp_mss);
+ rcv, snd, reuseport, transparent, tcp_mss, freebind);
}
return make_sock(stype, ifname, port, hints, v6only, noip6, rcv, snd,
- reuseport, transparent, tcp_mss);
+ reuseport, transparent, tcp_mss, freebind);
}
/**
* set to false on exit if reuseport failed due to no kernel support.
* @param transparent: set IP_TRANSPARENT socket option.
* @param tcp_mss: maximum segment size of tcp socket. default if zero.
+ * @param freebind: set IP_FREEBIND socket option.
* @return: returns false on error.
*/
static int
ports_create_if(const char* ifname, int do_auto, int do_udp, int do_tcp,
struct addrinfo *hints, const char* port, struct listen_port** list,
size_t rcv, size_t snd, int ssl_port, int* reuseport, int transparent,
- int tcp_mss)
+ int tcp_mss, int freebind)
{
int s, noip6=0;
if(!do_udp && !do_tcp)
if(do_auto) {
if((s = make_sock_port(SOCK_DGRAM, ifname, port, hints, 1,
&noip6, rcv, snd, reuseport, transparent,
- tcp_mss)) == -1) {
+ tcp_mss, freebind)) == -1) {
if(noip6) {
log_warn("IPv6 protocol not available");
return 1;
/* regular udp socket */
if((s = make_sock_port(SOCK_DGRAM, ifname, port, hints, 1,
&noip6, rcv, snd, reuseport, transparent,
- tcp_mss)) == -1) {
+ tcp_mss, freebind)) == -1) {
if(noip6) {
log_warn("IPv6 protocol not available");
return 1;
atoi(strchr(ifname, '@')+1) == ssl_port) ||
(!strchr(ifname, '@') && atoi(port) == ssl_port));
if((s = make_sock_port(SOCK_STREAM, ifname, port, hints, 1,
- &noip6, 0, 0, reuseport, transparent, tcp_mss)) == -1) {
+ &noip6, 0, 0, reuseport, transparent, tcp_mss,
+ freebind)) == -1) {
if(noip6) {
/*log_warn("IPv6 protocol not available");*/
return 1;
cfg->so_rcvbuf, cfg->so_sndbuf,
cfg->ssl_port, reuseport,
cfg->ip_transparent,
- cfg->tcp_mss)) {
+ cfg->tcp_mss, cfg->ip_freebind)) {
listening_ports_free(list);
return NULL;
}
cfg->so_rcvbuf, cfg->so_sndbuf,
cfg->ssl_port, reuseport,
cfg->ip_transparent,
- cfg->tcp_mss)) {
+ cfg->tcp_mss, cfg->ip_freebind)) {
listening_ports_free(list);
return NULL;
}
cfg->so_rcvbuf, cfg->so_sndbuf,
cfg->ssl_port, reuseport,
cfg->ip_transparent,
- cfg->tcp_mss)) {
+ cfg->tcp_mss, cfg->ip_freebind)) {
listening_ports_free(list);
return NULL;
}
cfg->so_rcvbuf, cfg->so_sndbuf,
cfg->ssl_port, reuseport,
cfg->ip_transparent,
- cfg->tcp_mss)) {
+ cfg->tcp_mss, cfg->ip_freebind)) {
listening_ports_free(list);
return NULL;
}
* @param reuseport: if nonNULL and true, try to set SO_REUSEPORT on
* listening UDP port. Set to false on return if it failed to do so.
* @param transparent: set IP_TRANSPARENT socket option.
+ * @param freebind: set IP_FREEBIND socket option.
* @return: the socket. -1 on error.
*/
int create_udp_sock(int family, int socktype, struct sockaddr* addr,
socklen_t addrlen, int v6only, int* inuse, int* noproto, int rcv,
- int snd, int listen, int* reuseport, int transparent);
+ int snd, int listen, int* reuseport, int transparent, int freebind);
/**
* Create and bind TCP listening socket
* listening UDP port. Set to false on return if it failed to do so.
* @param transparent: set IP_TRANSPARENT socket option.
* @param mss: maximum segment size of the socket. if zero, leaves the default.
+ * @param freebind: set IP_FREEBIND socket option.
* @return: the socket. -1 on error.
*/
int create_tcp_accept_sock(struct addrinfo *addr, int v6only, int* noproto,
- int* reuseport, int transparent, int mss);
+ int* reuseport, int transparent, int mss, int freebind);
/**
* Create and bind local listening socket
lock_rw_destroy(&z->lock);
regional_destroy(z->region);
free(z->name);
+ free(z->taglist);
free(z);
}
{
struct local_zone* z = local_zone_create(nm, len, labs, t, c);
if(!z) {
+ free(nm);
log_err("out of memory");
return NULL;
}
return r;
}
+/** enter tagstring into zone */
+static int
+lz_enter_zone_tag(struct local_zones* zones, char* zname, uint8_t* list,
+ size_t len, uint16_t rr_class)
+{
+ uint8_t dname[LDNS_MAX_DOMAINLEN+1];
+ size_t dname_len = sizeof(dname);
+ int dname_labs, r = 0;
+ struct local_zone* z;
+
+ if(sldns_str2wire_dname_buf(zname, dname, &dname_len) != 0) {
+ log_err("cannot parse zone name in local-zone-tag: %s", zname);
+ return 0;
+ }
+ dname_labs = dname_count_labels(dname);
+
+ lock_rw_rdlock(&zones->lock);
+ z = local_zones_lookup(zones, dname, dname_len, dname_labs, rr_class);
+ if(!z) {
+ lock_rw_unlock(&zones->lock);
+ log_err("no local-zone for tag %s", zname);
+ return 0;
+ }
+ lock_rw_wrlock(&z->lock);
+ lock_rw_unlock(&zones->lock);
+ free(z->taglist);
+ z->taglist = memdup(list, len);
+ z->taglen = len;
+ if(z->taglist)
+ r = 1;
+ lock_rw_unlock(&z->lock);
+ return r;
+}
+
/** parse local-zone: statements */
static int
lz_enter_zones(struct local_zones* zones, struct config_file* cfg)
return 1;
}
+/** enter local-zone-tag info */
+static int
+lz_enter_zone_tags(struct local_zones* zones, struct config_file* cfg)
+{
+ struct config_strbytelist* p;
+ int c = 0;
+ for(p = cfg->local_zone_tags; p; p = p->next) {
+ if(!lz_enter_zone_tag(zones, p->str, p->str2, p->str2len,
+ LDNS_RR_CLASS_IN))
+ return 0;
+ c++;
+ }
+ if(c) verbose(VERB_ALGO, "applied tags to %d local zones", c);
+ return 1;
+}
+
/** enter auth data */
static int
lz_enter_data(struct local_zones* zones, struct config_file* cfg)
/* setup parent ptrs for lookup during data entry */
init_parents(zones);
+ /* insert local zone tags */
+ if(!lz_enter_zone_tags(zones, cfg)) {
+ return 0;
+ }
/* insert local data */
if(!lz_enter_data(zones, cfg)) {
return 0;
edns->udp_size = EDNS_ADVERTISED_SIZE;
edns->ext_rcode = 0;
edns->bits &= EDNS_DO;
- if(!reply_info_answer_encode(qinfo, &rep,
+ if(!edns_opt_inplace_reply(edns, temp) ||
+ !reply_info_answer_encode(qinfo, &rep,
*(uint16_t*)sldns_buffer_begin(buf),
sldns_buffer_read_u16_at(buf, 2),
buf, 0, 0, temp, udpsize, edns,
{
/* create */
struct local_zone* z = local_zone_create(name, len, labs, tp, dclass);
- if(!z) return NULL;
+ if(!z) {
+ free(name);
+ return NULL;
+ }
lock_rw_wrlock(&z->lock);
/* find the closest parent */
/** how to process zone */
enum localzone_type type;
+ /** tag bitlist */
+ uint8_t* taglist;
+ /** length of the taglist (in bytes) */
+ size_t taglen;
/** in this region the zone's data is allocated.
* the struct local_zone itself is malloced. */
s = mesh_state_create(mesh->env, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0);
if(!s) {
log_err("mesh_state_create: out of memory; SERVFAIL");
+ if(!edns_opt_inplace_reply(edns, mesh->env->scratch))
+ edns->opt_list = NULL;
error_encode(rep->c->buffer, LDNS_RCODE_SERVFAIL,
qinfo, qid, qflags, edns);
comm_point_send_reply(rep);
/* add reply to s */
if(!mesh_state_add_reply(s, edns, rep, qid, qflags, qinfo->qname)) {
log_err("mesh_new_client: out of memory; SERVFAIL");
+ if(!edns_opt_inplace_reply(edns, mesh->env->scratch))
+ edns->opt_list = NULL;
error_encode(rep->c->buffer, LDNS_RCODE_SERVFAIL,
qinfo, qid, qflags, edns);
comm_point_send_reply(rep);
r->edns.udp_size = EDNS_ADVERTISED_SIZE;
r->edns.ext_rcode = 0;
r->edns.bits &= EDNS_DO;
- if(!reply_info_answer_encode(&m->s.qinfo, rep, r->qid,
+ if(!edns_opt_inplace_reply(&r->edns, m->s.region) ||
+ !reply_info_answer_encode(&m->s.qinfo, rep, r->qid,
r->qflags, r->buf, 0, 1,
m->s.env->scratch, udp_size, &r->edns,
(int)(r->edns.bits & EDNS_DO), secure))
if(prev && prev->qflags == r->qflags &&
prev->edns.edns_present == r->edns.edns_present &&
prev->edns.bits == r->edns.bits &&
- prev->edns.udp_size == r->edns.udp_size) {
+ prev->edns.udp_size == r->edns.udp_size &&
+ edns_opt_list_compare(prev->edns.opt_list, r->edns.opt_list)
+ == 0) {
/* if the previous reply is identical to this one, fix ID */
if(prev->query_reply.c->buffer != r->query_reply.c->buffer)
sldns_buffer_copy(r->query_reply.c->buffer,
r->edns.ext_rcode = 0;
r->edns.bits &= EDNS_DO;
m->s.qinfo.qname = r->qname;
- if(!reply_info_answer_encode(&m->s.qinfo, rep, r->qid,
+ if(!edns_opt_inplace_reply(&r->edns, m->s.region) ||
+ !reply_info_answer_encode(&m->s.qinfo, rep, r->qid,
r->qflags, r->query_reply.c->buffer, 0, 1,
m->s.env->scratch, udp_size, &r->edns,
(int)(r->edns.bits & EDNS_DO), secure))
r->cb = cb;
r->cb_arg = cb_arg;
r->edns = *edns;
+ if(edns->opt_list) {
+ r->edns.opt_list = edns_opt_copy_region(edns->opt_list,
+ s->s.region);
+ if(!r->edns.opt_list)
+ return 0;
+ }
r->qid = qid;
r->qflags = qflags;
r->next = s->cb_list;
return 0;
r->query_reply = *rep;
r->edns = *edns;
+ if(edns->opt_list) {
+ r->edns.opt_list = edns_opt_copy_region(edns->opt_list,
+ s->s.region);
+ if(!r->edns.opt_list)
+ return 0;
+ }
r->qid = qid;
r->qflags = qflags;
r->start_time = *s->s.env->now_tv;
return 0;
s->reply_list = r;
return 1;
-
}
/**
#ifdef WITH_PYTHONMODULE
#include "pythonmod/pythonmod.h"
#endif
+#ifdef USE_CACHEDB
+#include "cachedb/cachedb.h"
+#endif
/** count number of modules (words) in the string */
static int
"dns64",
#ifdef WITH_PYTHONMODULE
"python",
+#endif
+#ifdef USE_CACHEDB
+ "cachedb",
#endif
"validator",
"iterator",
&dns64_get_funcblock,
#ifdef WITH_PYTHONMODULE
&pythonmod_get_funcblock,
+#endif
+#ifdef USE_CACHEDB
+ &cachedb_get_funcblock,
#endif
&val_get_funcblock,
&iter_get_funcblock,
}
if((r = query_dname_compare(q1->qbuf+10, q2->qbuf+10)) != 0)
return r;
+ if((r = edns_opt_list_compare(q1->opt_list, q2->opt_list)) != 0)
+ return r;
return sockaddr_cmp(&q1->addr, q1->addrlen, &q2->addr, q2->addrlen);
}
struct service_callback* p = sq->cblist, *np;
free(sq->qbuf);
free(sq->zone);
+ edns_opt_list_free(sq->opt_list);
while(p) {
np = p->next;
free(p);
sa->sin6_port = (in_port_t)htons((uint16_t)port);
fd = create_udp_sock(AF_INET6, SOCK_DGRAM,
(struct sockaddr*)addr, addrlen, 1, inuse, &noproto,
- 0, 0, 0, NULL, 0);
+ 0, 0, 0, NULL, 0, 0);
} else {
struct sockaddr_in* sa = (struct sockaddr_in*)addr;
sa->sin_port = (in_port_t)htons((uint16_t)port);
fd = create_udp_sock(AF_INET, SOCK_DGRAM,
(struct sockaddr*)addr, addrlen, 1, inuse, &noproto,
- 0, 0, 0, NULL, 0);
+ 0, 0, 0, NULL, 0, 0);
}
return fd;
}
/** lookup serviced query in serviced query rbtree */
static struct serviced_query*
lookup_serviced(struct outside_network* outnet, sldns_buffer* buff, int dnssec,
- struct sockaddr_storage* addr, socklen_t addrlen)
+ struct sockaddr_storage* addr, socklen_t addrlen,
+ struct edns_option* opt_list)
{
struct serviced_query key;
key.node.key = &key;
memcpy(&key.addr, addr, addrlen);
key.addrlen = addrlen;
key.outnet = outnet;
+ key.opt_list = opt_list;
return (struct serviced_query*)rbtree_search(outnet->serviced, &key);
}
serviced_create(struct outside_network* outnet, sldns_buffer* buff, int dnssec,
int want_dnssec, int nocaps, int tcp_upstream, int ssl_upstream,
struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
- size_t zonelen, int qtype)
+ size_t zonelen, int qtype, struct edns_option* opt_list)
{
struct serviced_query* sq = (struct serviced_query*)malloc(sizeof(*sq));
#ifdef UNBOUND_DEBUG
sq->ssl_upstream = ssl_upstream;
memcpy(&sq->addr, addr, addrlen);
sq->addrlen = addrlen;
+ sq->opt_list = NULL;
+ if(opt_list) {
+ sq->opt_list = edns_opt_copy_alloc(opt_list);
+ if(!sq->opt_list) {
+ free(sq->zone);
+ free(sq->qbuf);
+ free(sq);
+ return NULL;
+ }
+ }
sq->outnet = outnet;
sq->cblist = NULL;
sq->pending = NULL;
edns.edns_present = 1;
edns.ext_rcode = 0;
edns.edns_version = EDNS_ADVERTISED_VERSION;
+ edns.opt_list = sq->opt_list;
if(sq->status == serviced_query_UDP_EDNS_FRAG) {
if(addr_is_ip6(&sq->addr, sq->addrlen)) {
if(EDNS_FRAG_SIZE_IP6 < EDNS_ADVERTISED_SIZE)
sq->to_be_deleted = 1;
verbose(VERB_ALGO, "svcd callbacks start");
if(sq->outnet->use_caps_for_id && error == NETEVENT_NOERROR && c &&
- !sq->nocaps) {
+ !sq->nocaps && sq->qtype != LDNS_RR_TYPE_PTR) {
+ /* for type PTR do not check perturbed name in answer,
+ * compatibility with cisco dns guard boxes that mess up
+ * reverse queries 0x20 contents */
/* noerror and nxdomain must have a qname in reply */
if(sldns_buffer_read_u16_at(c->buffer, 4) == 0 &&
(LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
return sq->pending != NULL;
}
+/* see if packet is edns malformed; got zeroes at start.
+ * This is from servers that return malformed packets to EDNS0 queries,
+ * but they return good packets for nonEDNS0 queries.
+ * We try to detect their output; without resorting to a full parse or
+ * check for too many bytes after the end of the packet. */
+static int
+packet_edns_malformed(struct sldns_buffer* buf, int qtype)
+{
+ size_t len;
+ if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE)
+ return 1; /* malformed */
+ /* they have NOERROR rcode, 1 answer. */
+ if(LDNS_RCODE_WIRE(sldns_buffer_begin(buf)) != LDNS_RCODE_NOERROR)
+ return 0;
+ /* one query (to skip) and answer records */
+ if(LDNS_QDCOUNT(sldns_buffer_begin(buf)) != 1 ||
+ LDNS_ANCOUNT(sldns_buffer_begin(buf)) == 0)
+ return 0;
+ /* skip qname */
+ len = dname_valid(sldns_buffer_at(buf, LDNS_HEADER_SIZE),
+ sldns_buffer_limit(buf)-LDNS_HEADER_SIZE);
+ if(len == 0)
+ return 0;
+ if(len == 1 && qtype == 0)
+ return 0; /* we asked for '.' and type 0 */
+ /* and then 4 bytes (type and class of query) */
+ if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE + len + 4 + 3)
+ return 0;
+
+ /* and start with 11 zeroes as the answer RR */
+ /* so check the qtype of the answer record, qname=0, type=0 */
+ if(sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[0] == 0 &&
+ sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[1] == 0 &&
+ sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[2] == 0)
+ return 1;
+ return 0;
+}
+
int
serviced_udp_callback(struct comm_point* c, void* arg, int error,
struct comm_reply* rep)
||sq->status == serviced_query_UDP_EDNS_FRAG)
&& (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
== LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE(
- sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOTIMPL)) {
+ sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOTIMPL
+ || packet_edns_malformed(c->buffer, sq->qtype)
+ )) {
/* try to get an answer by falling back without EDNS */
verbose(VERB_ALGO, "serviced query: attempt without EDNS");
sq->status = serviced_query_UDP_EDNS_fallback;
outnet_serviced_query(struct outside_network* outnet,
uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
uint16_t flags, int dnssec, int want_dnssec, int nocaps,
- int tcp_upstream, int ssl_upstream, struct sockaddr_storage* addr,
- socklen_t addrlen, uint8_t* zone, size_t zonelen,
- comm_point_callback_t* callback, void* callback_arg,
+ int tcp_upstream, int ssl_upstream, struct edns_option* opt_list,
+ struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
+ size_t zonelen, comm_point_callback_t* callback, void* callback_arg,
sldns_buffer* buff)
{
struct serviced_query* sq;
struct service_callback* cb;
serviced_gen_query(buff, qname, qnamelen, qtype, qclass, flags);
- sq = lookup_serviced(outnet, buff, dnssec, addr, addrlen);
+ sq = lookup_serviced(outnet, buff, dnssec, addr, addrlen, opt_list);
/* duplicate entries are included in the callback list, because
* there is a counterpart registration by our caller that needs to
* be doubly-removed (with callbacks perhaps). */
/* make new serviced query entry */
sq = serviced_create(outnet, buff, dnssec, want_dnssec, nocaps,
tcp_upstream, ssl_upstream, addr, addrlen, zone,
- zonelen, (int)qtype);
+ zonelen, (int)qtype, opt_list);
if(!sq) {
free(cb);
return NULL;
callback_list_remove(sq, cb_arg);
/* if callbacks() routine scheduled deletion, let it do that */
if(!sq->cblist && !sq->to_be_deleted) {
-#ifdef UNBOUND_DEBUG
- rbnode_t* rem =
-#else
- (void)
-#endif
- rbtree_delete(sq->outnet->serviced, sq);
- log_assert(rem); /* should be present */
+ (void)rbtree_delete(sq->outnet->serviced, sq);
serviced_delete(sq);
}
}
struct sldns_buffer;
struct serviced_query;
struct dt_env;
+struct edns_option;
/**
* Send queries to outside servers and wait for answers from servers.
int last_rtt;
/** do we know edns probe status already, for UDP_EDNS queries */
int edns_lame_known;
+ /** edns options to use for sending upstream packet */
+ struct edns_option* opt_list;
/** outside network this is part of */
struct outside_network* outnet;
/** list of interested parties that need callback on results. */
* @param nocaps: ignore use_caps_for_id and use unperturbed qname.
* @param tcp_upstream: use TCP for upstream queries.
* @param ssl_upstream: use SSL for upstream queries.
+ * @param opt_list: pass edns option list (deep copied into serviced query)
+ * these options are set on the outgoing packets.
* @param callback: callback function.
* @param callback_arg: user argument to callback function.
* @param addr: to which server to send the query.
struct serviced_query* outnet_serviced_query(struct outside_network* outnet,
uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
uint16_t flags, int dnssec, int want_dnssec, int nocaps,
- int tcp_upstream, int ssl_upstream, struct sockaddr_storage* addr,
- socklen_t addrlen, uint8_t* zone, size_t zonelen,
- comm_point_callback_t* callback, void* callback_arg,
+ int tcp_upstream, int ssl_upstream, struct edns_option* opt_list,
+ struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
+ size_t zonelen, comm_point_callback_t* callback, void* callback_arg,
struct sldns_buffer* buff);
/**
offset += length;
Y = BN_bin2bn(key+offset, (int)length, NULL);
- offset += length;
/* create the key and set its properties */
if(!Q || !P || !G || !Y || !(dsa = DSA_new())) {
target[o+1] = b64[ ((src[i]&0x03)<<4) | (src[i+1]>>4) ];
target[o+2] = b64[ ((src[i+1]&0x0f)<<2) ];
target[o+3] = pad64;
- i += 2;
+ /* i += 2; */
o += 4;
break;
case 1:
target[o+1] = b64[ ((src[i]&0x03)<<4) ];
target[o+2] = pad64;
target[o+3] = pad64;
- i += 1;
+ /* i += 1; */
o += 4;
break;
case 0:
LDNS_EDNS_DAU = 5, /* RFC6975 */
LDNS_EDNS_DHU = 6, /* RFC6975 */
LDNS_EDNS_N3U = 7, /* RFC6975 */
- LDNS_EDNS_CLIENT_SUBNET = 8 /* draft-vandergaast-edns-client-subnet */
+ LDNS_EDNS_CLIENT_SUBNET = 8, /* draft-vandergaast-edns-client-subnet */
+ LDNS_EDNS_PADDING = 12 /* RFC7830 */
};
typedef enum sldns_enum_edns_option sldns_edns_option;
parse_state?parse_state->default_ttl:0,
(parse_state&&parse_state->origin_len)?
parse_state->origin:NULL,
- parse_state->origin_len,
+ parse_state?parse_state->origin_len:0,
(parse_state&&parse_state->prev_rr_len)?
parse_state->prev_rr:NULL,
- parse_state->prev_rr_len);
+ parse_state?parse_state->prev_rr_len:0);
}
return LDNS_WIREPARSE_ERR_OK;
}
{ 6, "DHU" },
{ 7, "N3U" },
{ 8, "edns-client-subnet" },
+ { 12, "Padding" },
{ 0, NULL}
};
sldns_lookup_table* sldns_edns_options = sldns_edns_options_data;
case LDNS_EDNS_CLIENT_SUBNET:
w += sldns_wire2str_edns_subnet_print(s, sl, optdata, optlen);
break;
+ case LDNS_EDNS_PADDING:
+ w += print_hex_buf(s, sl, optdata, optlen);
+ break;
default:
/* unknown option code */
w += print_hex_buf(s, sl, optdata, optlen);
&& strcmp(cfg->module_conf, "dns64 validator python iterator") != 0
&& strcmp(cfg->module_conf, "python dns64 iterator") != 0
&& strcmp(cfg->module_conf, "python dns64 validator iterator") != 0
+#endif
+#ifdef USE_CACHEDB
+ && strcmp(cfg->module_conf, "validator cachedb iterator") != 0
+ && strcmp(cfg->module_conf, "cachedb iterator") != 0
+ && strcmp(cfg->module_conf, "dns64 validator cachedb iterator") != 0
+ && strcmp(cfg->module_conf, "dns64 cachedb iterator") != 0
+ && strcmp(cfg->module_conf, "python dns64 cachedb iterator") != 0
+ && strcmp(cfg->module_conf, "python dns64 validator cachedb iterator") != 0
+ && strcmp(cfg->module_conf, "dns64 python cachedb iterator") != 0
+ && strcmp(cfg->module_conf, "dns64 python validator cachedb iterator") != 0
+ && strcmp(cfg->module_conf, "python cachedb iterator") != 0
+ && strcmp(cfg->module_conf, "python validator cachedb iterator") != 0
+ && strcmp(cfg->module_conf, "cachedb python iterator") != 0
+ && strcmp(cfg->module_conf, "validator cachedb python iterator") != 0
+ && strcmp(cfg->module_conf, "validator python cachedb iterator") != 0
#endif
) {
fatal_exit("module conf '%s' is not known to work",
printf(" flush_negative flush all negative data\n");
printf(" flush_stats flush statistics, make zero\n");
printf(" flush_requestlist drop queries that are worked on\n");
- printf(" dump_requestlist show what is worked on\n");
+ printf(" dump_requestlist show what is worked on by first thread\n");
printf(" flush_infra [all | ip] remove ping, edns for one IP or all\n");
printf(" dump_infra show ping and edns entries\n");
printf(" set_option opt: val set option to value, no reload\n");
size_t ATTR_UNUSED(qnamelen), uint16_t ATTR_UNUSED(qtype),
uint16_t ATTR_UNUSED(qclass), uint16_t ATTR_UNUSED(flags),
int ATTR_UNUSED(dnssec), int ATTR_UNUSED(want_dnssec),
- int ATTR_UNUSED(nocaps), struct sockaddr_storage* ATTR_UNUSED(addr),
+ int ATTR_UNUSED(nocaps), struct edns_option* ATTR_UNUSED(opt_list),
+ struct sockaddr_storage* ATTR_UNUSED(addr),
socklen_t ATTR_UNUSED(addrlen), uint8_t* ATTR_UNUSED(zone),
size_t ATTR_UNUSED(zonelen), struct module_qstate* ATTR_UNUSED(q))
{
size_t ATTR_UNUSED(qnamelen), uint16_t ATTR_UNUSED(qtype),
uint16_t ATTR_UNUSED(qclass), uint16_t ATTR_UNUSED(flags),
int ATTR_UNUSED(dnssec), int ATTR_UNUSED(want_dnssec),
- int ATTR_UNUSED(nocaps), struct sockaddr_storage* ATTR_UNUSED(addr),
+ int ATTR_UNUSED(nocaps), struct edns_option* ATTR_UNUSED(opt_list),
+ struct sockaddr_storage* ATTR_UNUSED(addr),
socklen_t ATTR_UNUSED(addrlen), uint8_t* ATTR_UNUSED(zone),
size_t ATTR_UNUSED(zonelen), struct module_qstate* ATTR_UNUSED(q))
{
cfg->so_sndbuf = 0;
cfg->so_reuseport = 0;
cfg->ip_transparent = 0;
+ cfg->ip_freebind = 0;
cfg->num_ifs = 0;
cfg->ifs = NULL;
cfg->num_out_ifs = 0;
if(!(cfg->dnstap_socket_path = strdup(DNSTAP_SOCKET_PATH)))
goto error_exit;
#endif
+ cfg->disable_dnssec_lame_check = 0;
cfg->ratelimit = 0;
cfg->ratelimit_slabs = 4;
cfg->ratelimit_size = 4*1024*1024;
else S_MEMSIZE("so-sndbuf:", so_sndbuf)
else S_YNO("so-reuseport:", so_reuseport)
else S_YNO("ip-transparent:", ip_transparent)
+ else S_YNO("ip-freebind:", ip_freebind)
else S_MEMSIZE("rrset-cache-size:", rrset_cache_size)
else S_POW2("rrset-cache-slabs:", rrset_cache_slabs)
else S_YNO("prefetch:", prefetch)
else S_STR("control-cert-file:", control_cert_file)
else S_STR("module-config:", module_conf)
else S_STR("python-script:", python_script)
+ else S_YNO("disable-dnssec-lame-check:", disable_dnssec_lame_check)
else if(strcmp(opt, "ratelimit:") == 0) {
IS_NUMBER_OR_ZERO; cfg->ratelimit = atoi(val);
infra_dp_ratelimit=cfg->ratelimit;
else S_POW2("ratelimit-slabs:", ratelimit_slabs)
else S_NUMBER_OR_ZERO("ratelimit-factor:", ratelimit_factor)
else S_YNO("qname-minimisation:", qname_minimisation)
+ else if(strcmp(opt, "define-tag:") ==0) {
+ return config_add_tag(cfg, val);
/* val_sig_skew_min and max are copied into val_env during init,
* so this does not update val_env with set_option */
- else if(strcmp(opt, "val-sig-skew-min:") == 0)
+ } else if(strcmp(opt, "val-sig-skew-min:") == 0)
{ IS_NUMBER_OR_ZERO; cfg->val_sig_skew_min = (int32_t)atoi(val); }
else if(strcmp(opt, "val-sig-skew-max:") == 0)
{ IS_NUMBER_OR_ZERO; cfg->val_sig_skew_max = (int32_t)atoi(val); }
* stub-zone, name, stub-addr, stub-host, stub-prime
* forward-first, stub-first,
* forward-zone, name, forward-addr, forward-host,
- * ratelimit-for-domain, ratelimit-below-domain */
+ * ratelimit-for-domain, ratelimit-below-domain,
+ * local-zone-tag */
return 0;
}
return 1;
/** compare and print list option */
#define O_LS2(opt, name, lst) if(strcmp(opt, name)==0) { \
struct config_str2list* p = cfg->lst; \
- for(p = cfg->lst; p; p = p->next) \
- snprintf(buf, len, "%s %s\n", p->str, p->str2); \
+ for(p = cfg->lst; p; p = p->next) { \
+ snprintf(buf, len, "%s %s", p->str, p->str2); \
func(buf, arg); \
+ } \
+ }
+/** compare and print taglist option */
+#define O_LTG(opt, name, lst) if(strcmp(opt, name)==0) { \
+ char* tmpstr = NULL; \
+ struct config_strbytelist *p = cfg->lst; \
+ for(p = cfg->lst; p; p = p->next) {\
+ tmpstr = config_taglist2str(cfg, p->str2, p->str2len); \
+ if(tmpstr) {\
+ snprintf(buf, len, "%s %s", p->str, tmpstr); \
+ func(buf, arg); \
+ free(tmpstr); \
+ } \
+ } \
}
int
else O_MEM(opt, "so-sndbuf", so_sndbuf)
else O_YNO(opt, "so-reuseport", so_reuseport)
else O_YNO(opt, "ip-transparent", ip_transparent)
+ else O_YNO(opt, "ip-freebind", ip_freebind)
else O_MEM(opt, "rrset-cache-size", rrset_cache_size)
else O_DEC(opt, "rrset-cache-slabs", rrset_cache_slabs)
else O_YNO(opt, "prefetch-key", prefetch_key)
else O_YNO(opt, "insecure-lan-zones", insecure_lan_zones)
else O_DEC(opt, "max-udp-size", max_udp_size)
else O_STR(opt, "python-script", python_script)
+ else O_YNO(opt, "disable-dnssec-lame-check", disable_dnssec_lame_check)
else O_DEC(opt, "ratelimit", ratelimit)
else O_MEM(opt, "ratelimit-size", ratelimit_size)
else O_DEC(opt, "ratelimit-slabs", ratelimit_slabs)
else O_DEC(opt, "val-sig-skew-min", val_sig_skew_min)
else O_DEC(opt, "val-sig-skew-max", val_sig_skew_max)
else O_YNO(opt, "qname-minimisation", qname_minimisation)
+ else O_IFC(opt, "define-tag", num_tags, tagname)
+ else O_LTG(opt, "local-zone-tag", local_zone_tags)
/* not here:
* outgoing-permit, outgoing-avoid - have list of ports
* local-zone - zones and nodefault variables
}
}
+/** delete string array */
+static void
+config_del_strarray(char** array, int num)
+{
+ int i;
+ if(!array)
+ return;
+ for(i=0; i<num; i++) {
+ free(array[i]);
+ }
+ free(array);
+}
+
+/** delete stringbytelist */
+static void
+config_del_strbytelist(struct config_strbytelist* p)
+{
+ struct config_strbytelist* np;
+ while(p) {
+ np = p->next;
+ free(p->str);
+ free(p->str2);
+ free(p);
+ p = np;
+ }
+}
+
void
config_delete(struct config_file* cfg)
{
free(cfg->target_fetch_policy);
free(cfg->ssl_service_key);
free(cfg->ssl_service_pem);
- if(cfg->ifs) {
- int i;
- for(i=0; i<cfg->num_ifs; i++)
- free(cfg->ifs[i]);
- free(cfg->ifs);
- }
- if(cfg->out_ifs) {
- int i;
- for(i=0; i<cfg->num_out_ifs; i++)
- free(cfg->out_ifs[i]);
- free(cfg->out_ifs);
- }
+ config_del_strarray(cfg->ifs, cfg->num_ifs);
+ config_del_strarray(cfg->out_ifs, cfg->num_out_ifs);
config_delstubs(cfg->stubs);
config_delstubs(cfg->forwards);
config_delstrlist(cfg->donotqueryaddrs);
config_deldblstrlist(cfg->local_zones);
config_delstrlist(cfg->local_zones_nodefault);
config_delstrlist(cfg->local_data);
+ config_del_strarray(cfg->tagname, cfg->num_tags);
+ config_del_strbytelist(cfg->local_zone_tags);
config_delstrlist(cfg->control_ifs);
free(cfg->server_key_file);
free(cfg->server_cert_file);
return 1;
}
+int
+cfg_strbytelist_insert(struct config_strbytelist** head, char* item,
+ uint8_t* i2, size_t i2len)
+{
+ struct config_strbytelist* s;
+ if(!item || !i2 || !head)
+ return 0;
+ s = (struct config_strbytelist*)calloc(1, sizeof(*s));
+ if(!s)
+ return 0;
+ s->str = item;
+ s->str2 = i2;
+ s->str2len = i2len;
+ s->next = *head;
+ *head = s;
+ return 1;
+}
+
time_t
cfg_convert_timeval(const char* str)
{
return 1;
}
+int
+find_tag_id(struct config_file* cfg, const char* tag)
+{
+ int i;
+ for(i=0; i<cfg->num_tags; i++) {
+ if(strcmp(cfg->tagname[i], tag) == 0)
+ return i;
+ }
+ return -1;
+}
+
+int
+config_add_tag(struct config_file* cfg, const char* tag)
+{
+ char** newarray;
+ char* newtag;
+ if(find_tag_id(cfg, tag) != -1)
+ return 1; /* nothing to do */
+ newarray = (char**)malloc(sizeof(char*)*(cfg->num_tags+1));
+ if(!newarray)
+ return 0;
+ newtag = strdup(tag);
+ if(!newtag) {
+ free(newarray);
+ return 0;
+ }
+ if(cfg->tagname) {
+ memcpy(newarray, cfg->tagname, sizeof(char*)*cfg->num_tags);
+ free(cfg->tagname);
+ }
+ newarray[cfg->num_tags++] = newtag;
+ cfg->tagname = newarray;
+ return 1;
+}
+
+/** set a bit in a bit array */
+static void
+cfg_set_bit(uint8_t* bitlist, size_t len, int id)
+{
+ int pos = id/8;
+ log_assert((size_t)pos < len);
+ bitlist[pos] |= 1<<(id%8);
+}
+
+uint8_t* config_parse_taglist(struct config_file* cfg, char* str,
+ size_t* listlen)
+{
+ uint8_t* taglist = NULL;
+ size_t len = 0;
+ char* p, *s;
+
+ /* allocate */
+ if(cfg->num_tags == 0) {
+ log_err("parse taglist, but no tags defined");
+ return 0;
+ }
+ len = (size_t)(cfg->num_tags+7)/8;
+ taglist = calloc(1, len);
+ if(!taglist) {
+ log_err("out of memory");
+ return 0;
+ }
+
+ /* parse */
+ s = str;
+ while((p=strsep(&s, " \t\n")) != NULL) {
+ if(*p) {
+ int id = find_tag_id(cfg, p);
+ /* set this bit in the bitlist */
+ if(id == -1) {
+ log_err("unknown tag: %s", p);
+ free(taglist);
+ return 0;
+ }
+ cfg_set_bit(taglist, len, id);
+ }
+ }
+
+ *listlen = len;
+ return taglist;
+}
+
+char* config_taglist2str(struct config_file* cfg, uint8_t* taglist,
+ size_t taglen)
+{
+ char buf[10240];
+ size_t i, j, len = 0;
+ buf[0] = 0;
+ for(i=0; i<taglen; i++) {
+ if(taglist[i] == 0)
+ continue;
+ for(j=0; j<8; j++) {
+ if((taglist[i] & (1<<j)) != 0) {
+ size_t id = i*8 + j;
+ snprintf(buf+len, sizeof(buf)-len, "%s%s",
+ (len==0?"":" "), cfg->tagname[id]);
+ len += strlen(buf+len);
+ }
+ }
+ }
+ return strdup(buf);
+}
+
+int taglist_intersect(uint8_t* list1, size_t list1len, uint8_t* list2,
+ size_t list2len)
+{
+ size_t i;
+ if(!list1 || !list2)
+ return 0;
+ for(i=0; i<list1len && i<list2len; i++) {
+ if((list1[i] & list2[i]) != 0)
+ return 1;
+ }
+ return 0;
+}
+
void
config_apply(struct config_file* config)
{
struct config_stub;
struct config_strlist;
struct config_str2list;
+struct config_strbytelist;
struct module_qstate;
struct sock_list;
struct ub_packed_rrset_key;
int so_reuseport;
/** IP_TRANSPARENT socket option requested on port 53 sockets */
int ip_transparent;
+ /** IP_FREEBIND socket option request on port 53 sockets */
+ int ip_freebind;
/** number of interfaces to open. If 0 default all interfaces. */
int num_ifs;
int unblock_lan_zones;
/** insecure lan zones (don't validate AS112 zones) */
int insecure_lan_zones;
+ /** list of zonename, tagbitlist */
+ struct config_strbytelist* local_zone_tags;
+ /** tag list, array with tagname[i] is malloced string */
+ char** tagname;
+ /** number of items in the taglist */
+ int num_tags;
/** remote control section. enable toggle. */
int remote_control_enable;
/** true to log dnstap FORWARDER_RESPONSE message events */
int dnstap_log_forwarder_response_messages;
+ /** true to disable DNSSEC lameness check in iterator */
+ int disable_dnssec_lame_check;
+
/** ratelimit 0 is off, otherwise qps (unless overridden) */
int ratelimit;
/** number of slabs for ratelimit cache */
char* str2;
};
+/**
+ * List of string, bytestring for config options
+ */
+struct config_strbytelist {
+ /** next item in list */
+ struct config_strbytelist* next;
+ /** first string */
+ char* str;
+ /** second bytestring */
+ uint8_t* str2;
+ size_t str2len;
+};
+
/** List head for strlist processing, used for append operation. */
struct config_strlist_head {
/** first in list of text items */
*/
int cfg_str2list_insert(struct config_str2list** head, char* item, char* i2);
+/**
+ * Insert string into strbytelist.
+ * @param head: pointer to str2list head variable.
+ * @param item: new item. malloced by caller. If NULL the insertion fails.
+ * @param i2: 2nd string, malloced by caller. If NULL the insertion fails.
+ * @param i2len: length of the i2 bytestring.
+ * @return: true on success.
+ */
+int cfg_strbytelist_insert(struct config_strbytelist** head, char* item,
+ uint8_t* i2, size_t i2len);
+
/**
* Find stub in config list, also returns prevptr (for deletion).
* @param pp: call routine with pointer to a pointer to the start of the list,
*/
int cfg_parse_memsize(const char* str, size_t* res);
+/**
+ * Add a tag name to the config. It is added at the end with a new ID value.
+ * @param cfg: the config structure.
+ * @param tag: string (which is copied) with the name.
+ * @return: false on alloc failure.
+ */
+int config_add_tag(struct config_file* cfg, const char* tag);
+
+/**
+ * Find tag ID in the tag list.
+ * @param cfg: the config structure.
+ * @param tag: string with tag name to search for.
+ * @return: 0..(num_tags-1) with tag ID, or -1 if tagname is not found.
+ */
+int find_tag_id(struct config_file* cfg, const char* tag);
+
+/**
+ * parse taglist from string into bytestring with bitlist.
+ * @param cfg: the config structure (with tagnames)
+ * @param str: the string to parse. Parse puts 0 bytes in string.
+ * @param listlen: returns length of in bytes.
+ * @return malloced bytes with a bitlist of the tags. or NULL on parse error
+ * or malloc failure.
+ */
+uint8_t* config_parse_taglist(struct config_file* cfg, char* str,
+ size_t* listlen);
+
+/**
+ * convert tag bitlist to a malloced string with tag names. For debug output.
+ * @param cfg: the config structure (with tagnames)
+ * @param taglist: the tag bitlist.
+ * @param len: length of the tag bitlist.
+ * @return malloced string or NULL.
+ */
+char* config_taglist2str(struct config_file* cfg, uint8_t* taglist,
+ size_t len);
+
+/**
+ * see if two taglists intersect (have tags in common).
+ * @param list1: first tag bitlist.
+ * @param list1len: length in bytes of first list.
+ * @param list2: second tag bitlist.
+ * @param list2len: length in bytes of second list.
+ * @return true if there are tags in common, 0 if not.
+ */
+int taglist_intersect(uint8_t* list1, size_t list1len, uint8_t* list2,
+ size_t list2len);
+
/**
* Parse local-zone directive into two strings and register it in the config.
* @param cfg: to put it in.
so-sndbuf{COLON} { YDVAR(1, VAR_SO_SNDBUF) }
so-reuseport{COLON} { YDVAR(1, VAR_SO_REUSEPORT) }
ip-transparent{COLON} { YDVAR(1, VAR_IP_TRANSPARENT) }
+ip-freebind{COLON} { YDVAR(1, VAR_IP_FREEBIND) }
chroot{COLON} { YDVAR(1, VAR_CHROOT) }
username{COLON} { YDVAR(1, VAR_USERNAME) }
directory{COLON} { YDVAR(1, VAR_DIRECTORY) }
max-udp-size{COLON} { YDVAR(1, VAR_MAX_UDP_SIZE) }
dns64-prefix{COLON} { YDVAR(1, VAR_DNS64_PREFIX) }
dns64-synthall{COLON} { YDVAR(1, VAR_DNS64_SYNTHALL) }
+define-tag{COLON} { YDVAR(1, VAR_DEFINE_TAG) }
+local-zone-tag{COLON} { YDVAR(2, VAR_LOCAL_ZONE_TAG) }
dnstap{COLON} { YDVAR(0, VAR_DNSTAP) }
dnstap-enable{COLON} { YDVAR(1, VAR_DNSTAP_ENABLE) }
dnstap-socket-path{COLON} { YDVAR(1, VAR_DNSTAP_SOCKET_PATH) }
YDVAR(1, VAR_DNSTAP_LOG_FORWARDER_QUERY_MESSAGES) }
dnstap-log-forwarder-response-messages{COLON} {
YDVAR(1, VAR_DNSTAP_LOG_FORWARDER_RESPONSE_MESSAGES) }
+disable-dnssec-lame-check{COLON} { YDVAR(1, VAR_DISABLE_DNSSEC_LAME_CHECK) }
ratelimit{COLON} { YDVAR(1, VAR_RATELIMIT) }
ratelimit-slabs{COLON} { YDVAR(1, VAR_RATELIMIT_SLABS) }
ratelimit-size{COLON} { YDVAR(1, VAR_RATELIMIT_SIZE) }
%token VAR_DNSTAP_LOG_FORWARDER_QUERY_MESSAGES
%token VAR_DNSTAP_LOG_FORWARDER_RESPONSE_MESSAGES
%token VAR_HARDEN_ALGO_DOWNGRADE VAR_IP_TRANSPARENT
+%token VAR_DISABLE_DNSSEC_LAME_CHECK
%token VAR_RATELIMIT VAR_RATELIMIT_SLABS VAR_RATELIMIT_SIZE
%token VAR_RATELIMIT_FOR_DOMAIN VAR_RATELIMIT_BELOW_DOMAIN VAR_RATELIMIT_FACTOR
%token VAR_CAPS_WHITELIST VAR_CACHE_MAX_NEGATIVE_TTL VAR_PERMIT_SMALL_HOLDDOWN
-%token VAR_QNAME_MINIMISATION
+%token VAR_QNAME_MINIMISATION VAR_IP_FREEBIND VAR_DEFINE_TAG VAR_LOCAL_ZONE_TAG
%%
toplevelvars: /* empty */ | toplevelvars toplevelvar ;
server_ratelimit_size | server_ratelimit_for_domain |
server_ratelimit_below_domain | server_ratelimit_factor |
server_caps_whitelist | server_cache_max_negative_ttl |
- server_permit_small_holddown | server_qname_minimisation
+ server_permit_small_holddown | server_qname_minimisation |
+ server_ip_freebind | server_define_tag | server_local_zone_tag |
+ server_disable_dnssec_lame_check
;
stubstart: VAR_STUB_ZONE
{
free($2);
}
;
+server_ip_freebind: VAR_IP_FREEBIND STRING_ARG
+ {
+ OUTYY(("P(server_ip_freebind:%s)\n", $2));
+ if(strcmp($2, "yes") != 0 && strcmp($2, "no") != 0)
+ yyerror("expected yes or no.");
+ else cfg_parser->cfg->ip_freebind =
+ (strcmp($2, "yes")==0);
+ free($2);
+ }
+ ;
server_edns_buffer_size: VAR_EDNS_BUFFER_SIZE STRING_ARG
{
OUTYY(("P(server_edns_buffer_size:%s)\n", $2));
free($2);
}
;
+server_define_tag: VAR_DEFINE_TAG STRING_ARG
+ {
+ char* p, *s = $2;
+ OUTYY(("P(server_define_tag:%s)\n", $2));
+ while((p=strsep(&s, " \t\n")) != NULL) {
+ if(*p) {
+ if(!config_add_tag(cfg_parser->cfg, p))
+ yyerror("could not define-tag, "
+ "out of memory");
+ }
+ }
+ free($2);
+ }
+ ;
+server_local_zone_tag: VAR_LOCAL_ZONE_TAG STRING_ARG STRING_ARG
+ {
+ size_t len = 0;
+ uint8_t* bitlist = config_parse_taglist(cfg_parser->cfg, $3,
+ &len);
+ free($3);
+ OUTYY(("P(server_local_zone_tag:%s)\n", $2));
+ if(!bitlist)
+ yyerror("could not parse tags, (define-tag them first)");
+ if(bitlist) {
+ if(!cfg_strbytelist_insert(
+ &cfg_parser->cfg->local_zone_tags,
+ $2, bitlist, len)) {
+ yyerror("out of memory");
+ free($2);
+ }
+ }
+ }
+ ;
server_ratelimit: VAR_RATELIMIT STRING_ARG
{
OUTYY(("P(server_ratelimit:%s)\n", $2));
free(cfg_parser->cfg->python_script);
cfg_parser->cfg->python_script = $2;
}
+server_disable_dnssec_lame_check: VAR_DISABLE_DNSSEC_LAME_CHECK STRING_ARG
+ {
+ OUTYY(("P(disable_dnssec_lame_check:%s)\n", $2));
+ if (strcmp($2, "yes") != 0 && strcmp($2, "no") != 0)
+ yyerror("expected yes or no.");
+ else cfg_parser->cfg->disable_dnssec_lame_check =
+ (strcmp($2, "yes")==0);
+ free($2);
+ }
%%
/* parse helper routines could be here */
log_assert(len1 == len2 && len1 != 0);
/* compare labels */
while(len1--) {
- if(tolower((unsigned char)*d1++) != tolower((unsigned char)*d2++)) {
- if(tolower((unsigned char)d1[-1]) < tolower((unsigned char)d2[-1]))
+ if(tolower((unsigned char)*d1) != tolower((unsigned char)*d2)) {
+ if(tolower((unsigned char)*d1) < tolower((unsigned char)*d2))
return -1;
return 1;
}
+ d1++;
+ d2++;
}
len1 = *d1++;
len2 = *d2++;
log_assert(lablen <= LDNS_MAX_LABELLEN);
labuf[0] = lablen;
i=0;
- while(lablen--)
- labuf[++i] = (uint8_t)tolower((unsigned char)*dname++);
+ while(lablen--) {
+ labuf[++i] = (uint8_t)tolower((unsigned char)*dname);
+ dname++;
+ }
h = hashlittle(labuf, labuf[0] + 1, h);
lablen = *dname++;
}
log_assert(lablen <= LDNS_MAX_LABELLEN);
labuf[0] = lablen;
i=0;
- while(lablen--)
- labuf[++i] = (uint8_t)tolower((unsigned char)*dname++);
+ while(lablen--) {
+ labuf[++i] = (uint8_t)tolower((unsigned char)*dname);
+ dname++;
+ }
h = hashlittle(labuf, labuf[0] + 1, h);
lablen = *dname++;
}
uint16_t
calc_edns_field_size(struct edns_data* edns)
{
+ size_t rdatalen = 0;
+ struct edns_option* opt;
if(!edns || !edns->edns_present)
return 0;
- /* domain root '.' + type + class + ttl + rdatalen(=0) */
- return 1 + 2 + 2 + 4 + 2;
+ for(opt = edns->opt_list; opt; opt = opt->next) {
+ rdatalen += 4 + opt->opt_len;
+ }
+ /* domain root '.' + type + class + ttl + rdatalen */
+ return 1 + 2 + 2 + 4 + 2 + rdatalen;
}
void
attach_edns_record(sldns_buffer* pkt, struct edns_data* edns)
{
size_t len;
+ size_t rdatapos;
+ struct edns_option* opt;
if(!edns || !edns->edns_present)
return;
/* inc additional count */
sldns_buffer_write_u8(pkt, edns->ext_rcode); /* ttl */
sldns_buffer_write_u8(pkt, edns->edns_version);
sldns_buffer_write_u16(pkt, edns->bits);
+ rdatapos = sldns_buffer_position(pkt);
sldns_buffer_write_u16(pkt, 0); /* rdatalen */
+ /* write rdata */
+ for(opt=edns->opt_list; opt; opt=opt->next) {
+ sldns_buffer_write_u16(pkt, opt->opt_code);
+ sldns_buffer_write_u16(pkt, opt->opt_len);
+ if(opt->opt_len != 0)
+ sldns_buffer_write(pkt, opt->opt_data, opt->opt_len);
+ }
+ if(edns->opt_list)
+ sldns_buffer_write_u16_at(pkt, rdatapos,
+ sldns_buffer_position(pkt)-rdatapos-2);
sldns_buffer_flip(pkt);
}
*/
#include "config.h"
#include "util/data/msgparse.h"
+#include "util/data/msgreply.h"
#include "util/data/dname.h"
#include "util/data/packed_rrset.h"
#include "util/storage/lookup3.h"
return 0;
}
+/** parse EDNS options from EDNS wireformat rdata */
+static int
+parse_edns_options(uint8_t* rdata_ptr, size_t rdata_len,
+ struct edns_data* edns, struct regional* region)
+{
+ /* while still more options, and have code+len to read */
+ /* ignores partial content (i.e. rdata len 3) */
+ while(rdata_len >= 4) {
+ uint16_t opt_code = sldns_read_uint16(rdata_ptr);
+ uint16_t opt_len = sldns_read_uint16(rdata_ptr+2);
+ rdata_ptr += 4;
+ rdata_len -= 4;
+ if(opt_len > rdata_len)
+ break; /* option code partial */
+ if(!edns_opt_append(edns, region, opt_code, opt_len,
+ rdata_ptr)) {
+ log_err("out of memory");
+ return 0;
+ }
+ rdata_ptr += opt_len;
+ rdata_len -= opt_len;
+ }
+ return 1;
+}
+
int
-parse_extract_edns(struct msg_parse* msg, struct edns_data* edns)
+parse_extract_edns(struct msg_parse* msg, struct edns_data* edns,
+ struct regional* region)
{
struct rrset_parse* rrset = msg->rrset_first;
struct rrset_parse* prev = 0;
struct rrset_parse* found = 0;
struct rrset_parse* found_prev = 0;
+ size_t rdata_len;
+ uint8_t* rdata_ptr;
/* since the class encodes the UDP size, we cannot use hash table to
* find the EDNS OPT record. Scan the packet. */
while(rrset) {
edns->edns_version = found->rr_last->ttl_data[1];
edns->bits = sldns_read_uint16(&found->rr_last->ttl_data[2]);
edns->udp_size = ntohs(found->rrset_class);
- /* ignore rdata and rrsigs */
+ edns->opt_list = NULL;
+
+ /* take the options */
+ rdata_len = found->rr_first->size;
+ rdata_ptr = found->rr_first->ttl_data+6;
+ if(!parse_edns_options(rdata_ptr, rdata_len, edns, region))
+ return 0;
+
+ /* ignore rrsigs */
+
return 0;
}
int
-parse_edns_from_pkt(sldns_buffer* pkt, struct edns_data* edns)
+parse_edns_from_pkt(sldns_buffer* pkt, struct edns_data* edns,
+ struct regional* region)
{
+ size_t rdata_len;
+ uint8_t* rdata_ptr;
log_assert(LDNS_QDCOUNT(sldns_buffer_begin(pkt)) == 1);
log_assert(LDNS_ANCOUNT(sldns_buffer_begin(pkt)) == 0);
log_assert(LDNS_NSCOUNT(sldns_buffer_begin(pkt)) == 0);
edns->ext_rcode = sldns_buffer_read_u8(pkt); /* ttl used for bits */
edns->edns_version = sldns_buffer_read_u8(pkt);
edns->bits = sldns_buffer_read_u16(pkt);
- /* ignore rdata and rrsigs */
+ edns->opt_list = NULL;
+
+ /* take the options */
+ rdata_len = sldns_buffer_read_u16(pkt);
+ if(sldns_buffer_remaining(pkt) < rdata_len)
+ return LDNS_RCODE_FORMERR;
+ rdata_ptr = sldns_buffer_current(pkt);
+ if(!parse_edns_options(rdata_ptr, rdata_len, edns, region))
+ return LDNS_RCODE_SERVFAIL;
+
+ /* ignore rrsigs */
+
return 0;
}
struct rrset_parse;
struct rr_parse;
struct regional;
+struct edns_option;
/** number of buckets in parse rrset hash table. Must be power of 2. */
#define PARSE_TABLE_SIZE 32
/**
* EDNS data storage
- * EDNS rdata is ignored.
+ * rdata is parsed in a list (has accessor functions). allocated in a
+ * region.
*/
struct edns_data {
/** if EDNS OPT record was present */
uint16_t bits;
/** UDP reassembly size. */
uint16_t udp_size;
+ /** rdata element list, or NULL if none */
+ struct edns_option* opt_list;
+};
+
+/**
+ * EDNS option
+ */
+struct edns_option {
+ /** next item in list */
+ struct edns_option* next;
+ /** type of this edns option */
+ uint16_t opt_code;
+ /** length of this edns option (cannot exceed uint16 in encoding) */
+ size_t opt_len;
+ /** data of this edns option; allocated in region, or NULL if len=0 */
+ uint8_t* opt_data;
};
/**
* @param msg: parsed message structure. Modified on exit, if EDNS was present
* it is removed from the additional section.
* @param edns: the edns data is stored here. Does not have to be initialised.
+ * @param region: region to alloc results in (edns option contents)
* @return: 0 on success. or an RCODE on an error.
* RCODE formerr if OPT in wrong section, and so on.
*/
-int parse_extract_edns(struct msg_parse* msg, struct edns_data* edns);
+int parse_extract_edns(struct msg_parse* msg, struct edns_data* edns,
+ struct regional* region);
/**
* If EDNS data follows a query section, extract it and initialize edns struct.
* section. At end, right after EDNS data or no movement if failed.
* @param edns: the edns data allocated by the caller. Does not have to be
* initialised.
+ * @param region: region to alloc results in (edns option contents)
* @return: 0 on success, or an RCODE on error.
* RCODE formerr if OPT is badly formatted and so on.
*/
-int parse_edns_from_pkt(struct sldns_buffer* pkt, struct edns_data* edns);
+int parse_edns_from_pkt(struct sldns_buffer* pkt, struct edns_data* edns,
+ struct regional* region);
/**
* Calculate hash value for rrset in packet.
if((ret = parse_packet(pkt, msg, region)) != 0) {
return ret;
}
- if((ret = parse_extract_edns(msg, edns)) != 0)
+ if((ret = parse_extract_edns(msg, edns, region)) != 0)
return ret;
/* parse OK, allocate return structures */
}
return 1;
}
+
+int edns_opt_append(struct edns_data* edns, struct regional* region,
+ uint16_t code, size_t len, uint8_t* data)
+{
+ struct edns_option** prevp;
+ struct edns_option* opt;
+
+ /* allocate new element */
+ opt = (struct edns_option*)regional_alloc(region, sizeof(*opt));
+ if(!opt)
+ return 0;
+ opt->next = NULL;
+ opt->opt_code = code;
+ opt->opt_len = len;
+ opt->opt_data = regional_alloc_init(region, data, len);
+ if(!opt->opt_data)
+ return 0;
+
+ /* append at end of list */
+ prevp = &edns->opt_list;
+ while(*prevp != NULL)
+ prevp = &((*prevp)->next);
+ *prevp = opt;
+ return 1;
+}
+
+int edns_opt_inplace_reply(struct edns_data* edns, struct regional* region)
+{
+ (void)region;
+ /* remove all edns options from the reply, because only the
+ * options that we understand should be in the reply
+ * (sec 6.1.2 RFC 6891) */
+ edns->opt_list = NULL;
+ return 1;
+}
+
+struct edns_option* edns_opt_copy_region(struct edns_option* list,
+ struct regional* region)
+{
+ struct edns_option* result = NULL, *cur = NULL, *s;
+ while(list) {
+ /* copy edns option structure */
+ s = regional_alloc_init(region, list, sizeof(*list));
+ if(!s) return NULL;
+ s->next = NULL;
+
+ /* copy option data */
+ if(s->opt_data) {
+ s->opt_data = regional_alloc_init(region, s->opt_data,
+ s->opt_len);
+ if(!s->opt_data)
+ return NULL;
+ }
+
+ /* link into list */
+ if(cur)
+ cur->next = s;
+ else result = s;
+ cur = s;
+
+ /* examine next element */
+ list = list->next;
+ }
+ return result;
+}
+
+int edns_opt_compare(struct edns_option* p, struct edns_option* q)
+{
+ if(!p && !q) return 0;
+ if(!p) return -1;
+ if(!q) return 1;
+ log_assert(p && q);
+ if(p->opt_code != q->opt_code)
+ return (int)q->opt_code - (int)p->opt_code;
+ if(p->opt_len != q->opt_len)
+ return (int)q->opt_len - (int)p->opt_len;
+ if(p->opt_len != 0)
+ return memcmp(p->opt_data, q->opt_data, p->opt_len);
+ return 0;
+}
+
+int edns_opt_list_compare(struct edns_option* p, struct edns_option* q)
+{
+ int r;
+ while(p && q) {
+ r = edns_opt_compare(p, q);
+ if(r != 0)
+ return r;
+ p = p->next;
+ q = q->next;
+ }
+ if(p || q) {
+ /* uneven length lists */
+ if(p) return 1;
+ if(q) return -1;
+ }
+ return 0;
+}
+
+void edns_opt_list_free(struct edns_option* list)
+{
+ struct edns_option* n;
+ while(list) {
+ free(list->opt_data);
+ n = list->next;
+ free(list);
+ list = n;
+ }
+}
+
+struct edns_option* edns_opt_copy_alloc(struct edns_option* list)
+{
+ struct edns_option* result = NULL, *cur = NULL, *s;
+ while(list) {
+ /* copy edns option structure */
+ s = memdup(list, sizeof(*list));
+ if(!s) {
+ edns_opt_list_free(result);
+ return NULL;
+ }
+ s->next = NULL;
+
+ /* copy option data */
+ if(s->opt_data) {
+ s->opt_data = memdup(s->opt_data, s->opt_len);
+ if(!s->opt_data) {
+ edns_opt_list_free(result);
+ return NULL;
+ }
+ }
+
+ /* link into list */
+ if(cur)
+ cur->next = s;
+ else result = s;
+ cur = s;
+
+ /* examine next element */
+ list = list->next;
+ }
+ return result;
+}
+
+struct edns_option* edns_opt_find(struct edns_option* list, uint16_t code)
+{
+ struct edns_option* p;
+ for(p=list; p; p=p->next) {
+ if(p->opt_code == code)
+ return p;
+ }
+ return NULL;
+}
void log_query_info(enum verbosity_value v, const char* str,
struct query_info* qinf);
+/**
+ * Append edns option to edns data structure
+ */
+int edns_opt_append(struct edns_data* edns, struct regional* region,
+ uint16_t code, size_t len, uint8_t* data);
+
+/**
+ * Find edns option in edns list
+ * @param list: list of edns options (eg. edns.opt_list)
+ * @param code: opt code to find.
+ * @return NULL or the edns_option element.
+ */
+struct edns_option* edns_opt_find(struct edns_option* list, uint16_t code);
+
+/**
+ * Transform edns data structure from query structure into reply structure.
+ * In place transform, for errors and cache replies.
+ * @param edns: on input contains the edns from the query. On output contains
+ * the edns for the answer. Add new options to the opt_list to put them
+ * in the answer (allocated in the region, with edns_opt_append).
+ * @param region: to allocate stuff in.
+ * @return false on failure (servfail to client, or for some error encodings,
+ * no EDNS options in the answer).
+ */
+int edns_opt_inplace_reply(struct edns_data* edns, struct regional* region);
+
+/**
+ * Copy edns option list allocated to the new region
+ */
+struct edns_option* edns_opt_copy_region(struct edns_option* list,
+ struct regional* region);
+
+/**
+ * Copy edns option list allocated with malloc
+ */
+struct edns_option* edns_opt_copy_alloc(struct edns_option* list);
+
+/**
+ * Free edns option list allocated with malloc
+ */
+void edns_opt_list_free(struct edns_option* list);
+
+/**
+ * Compare an edns option. (not entire list). Also compares contents.
+ */
+int edns_opt_compare(struct edns_option* p, struct edns_option* q);
+
+/**
+ * Compare edns option lists, also the order and contents of edns-options.
+ */
+int edns_opt_list_compare(struct edns_option* p, struct edns_option* q);
+
#endif /* UTIL_DATA_MSGREPLY_H */
#ifdef WITH_PYTHONMODULE
#include "pythonmod/pythonmod.h"
#endif
+#ifdef USE_CACHEDB
+#include "cachedb/cachedb.h"
+#endif
int
fptr_whitelist_comm_point(comm_point_callback_t *fptr)
fptr_whitelist_modenv_send_query(struct outbound_entry* (*fptr)(
uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
uint16_t flags, int dnssec, int want_dnssec, int nocaps,
- struct sockaddr_storage* addr, socklen_t addrlen,
- uint8_t* zone, size_t zonelen,
+ struct edns_option* opt_list, struct sockaddr_storage* addr,
+ socklen_t addrlen, uint8_t* zone, size_t zonelen,
struct module_qstate* q))
{
if(fptr == &worker_send_query) return 1;
else if(fptr == &dns64_init) return 1;
#ifdef WITH_PYTHONMODULE
else if(fptr == &pythonmod_init) return 1;
+#endif
+#ifdef USE_CACHEDB
+ else if(fptr == &cachedb_init) return 1;
#endif
return 0;
}
else if(fptr == &dns64_deinit) return 1;
#ifdef WITH_PYTHONMODULE
else if(fptr == &pythonmod_deinit) return 1;
+#endif
+#ifdef USE_CACHEDB
+ else if(fptr == &cachedb_deinit) return 1;
#endif
return 0;
}
else if(fptr == &dns64_operate) return 1;
#ifdef WITH_PYTHONMODULE
else if(fptr == &pythonmod_operate) return 1;
+#endif
+#ifdef USE_CACHEDB
+ else if(fptr == &cachedb_operate) return 1;
#endif
return 0;
}
else if(fptr == &dns64_inform_super) return 1;
#ifdef WITH_PYTHONMODULE
else if(fptr == &pythonmod_inform_super) return 1;
+#endif
+#ifdef USE_CACHEDB
+ else if(fptr == &cachedb_inform_super) return 1;
#endif
return 0;
}
else if(fptr == &dns64_clear) return 1;
#ifdef WITH_PYTHONMODULE
else if(fptr == &pythonmod_clear) return 1;
+#endif
+#ifdef USE_CACHEDB
+ else if(fptr == &cachedb_clear) return 1;
#endif
return 0;
}
else if(fptr == &dns64_get_mem) return 1;
#ifdef WITH_PYTHONMODULE
else if(fptr == &pythonmod_get_mem) return 1;
+#endif
+#ifdef USE_CACHEDB
+ else if(fptr == &cachedb_get_mem) return 1;
#endif
return 0;
}
int fptr_whitelist_modenv_send_query(struct outbound_entry* (*fptr)(
uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
uint16_t flags, int dnssec, int want_dnssec, int nocaps,
- struct sockaddr_storage* addr, socklen_t addrlen,
+ struct edns_option*, struct sockaddr_storage* addr, socklen_t addrlen,
uint8_t* zone, size_t zonelen,
struct module_qstate* q));
4412,
4413,
4416,
+4418,
+4420,
4425,
4426,
4430,
4599,
4600,
4601,
+4621,
4658,
4659,
4660,
5436,
5437,
5443,
+5450,
5453,
5454,
5455,
7201,
7227,
7235,
+7244,
7262,
7272,
7273,
7570,
7574,
7588,
+7606,
7624,
7627,
7628,
23004,
23005,
23272,
+23294,
23333,
23400,
23401,
* EDNS, the answer is likely to be useless for this domain.
* @param nocaps: do not use caps_for_id, use the qname as given.
* (ignored if caps_for_id is disabled).
+ * @param opt_list: set these EDNS options on the outgoing packet.
+ * or NULL if none (the list is deep-copied).
* @param addr: where to.
* @param addrlen: length of addr.
* @param zone: delegation point name.
*/
struct outbound_entry* (*send_query)(uint8_t* qname, size_t qnamelen,
uint16_t qtype, uint16_t qclass, uint16_t flags, int dnssec,
- int want_dnssec, int nocaps, struct sockaddr_storage* addr,
- socklen_t addrlen, uint8_t* zone, size_t zonelen,
- struct module_qstate* q);
+ int want_dnssec, int nocaps, struct edns_option* opt_list,
+ struct sockaddr_storage* addr, socklen_t addrlen,
+ uint8_t* zone, size_t zonelen, struct module_qstate* q);
/**
* Detach-subqueries.
#endif
}
-#if defined(HAVE_SSL) && defined(OPENSSL_THREADS) && !defined(THREADS_DISABLED)
+#if defined(HAVE_SSL) && defined(OPENSSL_THREADS) && !defined(THREADS_DISABLED) && defined(CRYPTO_LOCK)
/** global lock list for openssl locks */
static lock_basic_t *ub_openssl_locks = NULL;
int ub_openssl_lock_init(void)
{
-#if defined(HAVE_SSL) && defined(OPENSSL_THREADS) && !defined(THREADS_DISABLED)
+#if defined(HAVE_SSL) && defined(OPENSSL_THREADS) && !defined(THREADS_DISABLED) && defined(CRYPTO_LOCK)
int i;
ub_openssl_locks = (lock_basic_t*)reallocarray(
NULL, (size_t)CRYPTO_num_locks(), sizeof(lock_basic_t));
void ub_openssl_lock_delete(void)
{
-#if defined(HAVE_SSL) && defined(OPENSSL_THREADS) && !defined(THREADS_DISABLED)
+#if defined(HAVE_SSL) && defined(OPENSSL_THREADS) && !defined(THREADS_DISABLED) && defined(CRYPTO_LOCK)
int i;
if(!ub_openssl_locks)
return;
*/
#include "config.h"
#include "util/netevent.h"
+#include "util/ub_event.h"
#include "util/log.h"
#include "util/net_help.h"
#include "util/fptr_wlist.h"
#define NUM_UDP_PER_SELECT 1
#endif
-/* We define libevent structures here to hide the libevent stuff. */
-
-#ifdef USE_MINI_EVENT
-# ifdef USE_WINSOCK
-# include "util/winsock_event.h"
-# else
-# include "util/mini_event.h"
-# endif /* USE_WINSOCK */
-#else /* USE_MINI_EVENT */
- /* we use libevent */
-# ifdef HAVE_EVENT_H
-# include <event.h>
-# else
-# include "event2/event.h"
-# include "event2/event_struct.h"
-# include "event2/event_compat.h"
-# endif
-#endif /* USE_MINI_EVENT */
-
/**
- * The internal event structure for keeping libevent info for the event.
+ * The internal event structure for keeping ub_event info for the event.
* Possibly other structures (list, tree) this is part of.
*/
struct internal_event {
/** the comm base */
struct comm_base* base;
- /** libevent event type, alloced here */
- struct event ev;
+ /** ub_event event type */
+ struct ub_event* ev;
};
/**
* Internal base structure, so that every thread has its own events.
*/
struct internal_base {
- /** libevent event_base type. */
- struct event_base* base;
+ /** ub_event event_base type. */
+ struct ub_event_base* base;
/** seconds time pointer points here */
time_t secs;
/** timeval with current time */
struct timeval now;
/** the event used for slow_accept timeouts */
- struct event slow_accept;
+ struct ub_event* slow_accept;
/** true if slow_accept is enabled */
int slow_accept_enabled;
};
* Internal timer structure, to store timer event in.
*/
struct internal_timer {
+ /** the super struct from which derived */
+ struct comm_timer super;
/** the comm base */
struct comm_base* base;
- /** libevent event type, alloced here */
- struct event ev;
+ /** ub_event event type */
+ struct ub_event* ev;
/** is timer enabled */
uint8_t enabled;
};
* Internal signal structure, to store signal event in.
*/
struct internal_signal {
- /** libevent event type, alloced here */
- struct event ev;
+ /** ub_event event type */
+ struct ub_event* ev;
/** next in signal list */
struct internal_signal* next;
};
/* -------- End of local definitions -------- */
-#ifdef USE_MINI_EVENT
-/** minievent updates the time when it blocks. */
-#define comm_base_now(x) /* nothing to do */
-#else /* !USE_MINI_EVENT */
-/** fillup the time values in the event base */
-static void
-comm_base_now(struct comm_base* b)
-{
- if(gettimeofday(&b->eb->now, NULL) < 0) {
- log_err("gettimeofday: %s", strerror(errno));
- }
- b->eb->secs = (time_t)b->eb->now.tv_sec;
-}
-#endif /* USE_MINI_EVENT */
-
struct comm_base*
comm_base_create(int sigs)
{
struct comm_base* b = (struct comm_base*)calloc(1,
sizeof(struct comm_base));
+ const char *evnm="event", *evsys="", *evmethod="";
+
if(!b)
return NULL;
b->eb = (struct internal_base*)calloc(1, sizeof(struct internal_base));
free(b);
return NULL;
}
-#ifdef USE_MINI_EVENT
- (void)sigs;
- /* use mini event time-sharing feature */
- b->eb->base = event_init(&b->eb->secs, &b->eb->now);
-#else
-# if defined(HAVE_EV_LOOP) || defined(HAVE_EV_DEFAULT_LOOP)
- /* libev */
- if(sigs)
- b->eb->base=(struct event_base *)ev_default_loop(EVFLAG_AUTO);
- else
- b->eb->base=(struct event_base *)ev_loop_new(EVFLAG_AUTO);
-# else
- (void)sigs;
-# ifdef HAVE_EVENT_BASE_NEW
- b->eb->base = event_base_new();
-# else
- b->eb->base = event_init();
-# endif
-# endif
-#endif
+ b->eb->base = ub_default_event_base(sigs, &b->eb->secs, &b->eb->now);
if(!b->eb->base) {
free(b->eb);
free(b);
return NULL;
}
- comm_base_now(b);
- /* avoid event_get_method call which causes crashes even when
- * not printing, because its result is passed */
- verbose(VERB_ALGO,
-#if defined(HAVE_EV_LOOP) || defined(HAVE_EV_DEFAULT_LOOP)
- "libev"
-#elif defined(USE_MINI_EVENT)
- "event "
-#else
- "libevent "
-#endif
- "%s uses %s method.",
- event_get_version(),
-#ifdef HAVE_EVENT_BASE_GET_METHOD
- event_base_get_method(b->eb->base)
-#else
- "not_obtainable"
-#endif
- );
+ ub_comm_base_now(b);
+ ub_get_event_sys(b->eb->base, &evnm, &evsys, &evmethod);
+ verbose(VERB_ALGO, "%s %s user %s method.", evnm, evsys, evmethod);
return b;
}
struct comm_base*
-comm_base_create_event(struct event_base* base)
+comm_base_create_event(struct ub_event_base* base)
{
struct comm_base* b = (struct comm_base*)calloc(1,
sizeof(struct comm_base));
return NULL;
}
b->eb->base = base;
- comm_base_now(b);
+ ub_comm_base_now(b);
return b;
}
if(!b)
return;
if(b->eb->slow_accept_enabled) {
- if(event_del(&b->eb->slow_accept) != 0) {
+ if(ub_event_del(b->eb->slow_accept) != 0) {
log_err("could not event_del slow_accept");
}
+ ub_event_free(b->eb->slow_accept);
}
-#ifdef USE_MINI_EVENT
- event_base_free(b->eb->base);
-#elif defined(HAVE_EVENT_BASE_FREE) && defined(HAVE_EVENT_BASE_ONCE)
- /* only libevent 1.2+ has it, but in 1.2 it is broken -
- assertion fails on signal handling ev that is not deleted
- in libevent 1.3c (event_base_once appears) this is fixed. */
- event_base_free(b->eb->base);
-#endif /* HAVE_EVENT_BASE_FREE and HAVE_EVENT_BASE_ONCE */
+ ub_event_base_free(b->eb->base);
b->eb->base = NULL;
free(b->eb);
free(b);
if(!b)
return;
if(b->eb->slow_accept_enabled) {
- if(event_del(&b->eb->slow_accept) != 0) {
+ if(ub_event_del(b->eb->slow_accept) != 0) {
log_err("could not event_del slow_accept");
}
+ ub_event_free(b->eb->slow_accept);
}
b->eb->base = NULL;
free(b->eb);
comm_base_dispatch(struct comm_base* b)
{
int retval;
- retval = event_base_dispatch(b->eb->base);
- if(retval != 0) {
+ retval = ub_event_base_dispatch(b->eb->base);
+ if(retval < 0) {
fatal_exit("event_dispatch returned error %d, "
"errno is %s", retval, strerror(errno));
}
void comm_base_exit(struct comm_base* b)
{
- if(event_base_loopexit(b->eb->base, NULL) != 0) {
+ if(ub_event_base_loopexit(b->eb->base) != 0) {
log_err("Could not loopexit");
}
}
b->cb_arg = arg;
}
-struct event_base* comm_base_internal(struct comm_base* b)
+struct ub_event_base* comm_base_internal(struct comm_base* b)
{
return b->eb->base;
}
rep.c = (struct comm_point*)arg;
log_assert(rep.c->type == comm_udp);
- if(!(event&EV_READ))
+ if(!(event&UB_EV_READ))
return;
log_assert(rep.c && rep.c->buffer && rep.c->fd == fd);
- comm_base_now(rep.c->ev->base);
+ ub_comm_base_now(rep.c->ev->base);
for(i=0; i<NUM_UDP_PER_SELECT; i++) {
sldns_buffer_clear(rep.c->buffer);
rep.addrlen = (socklen_t)sizeof(rep.addr);
rep.c = (struct comm_point*)arg;
log_assert(rep.c->type == comm_udp);
- if(!(event&EV_READ))
+ if(!(event&UB_EV_READ))
return;
log_assert(rep.c && rep.c->buffer && rep.c->fd == fd);
- comm_base_now(rep.c->ev->base);
+ ub_comm_base_now(rep.c->ev->base);
for(i=0; i<NUM_UDP_PER_SELECT; i++) {
sldns_buffer_clear(rep.c->buffer);
rep.addrlen = (socklen_t)sizeof(rep.addr);
/* set timeout, no mallocs */
tv.tv_sec = NETEVENT_SLOW_ACCEPT_TIME/1000;
tv.tv_usec = NETEVENT_SLOW_ACCEPT_TIME%1000;
- event_set(&b->eb->slow_accept, -1, EV_TIMEOUT,
+ b->eb->slow_accept = ub_event_new(b->eb->base,
+ -1, UB_EV_TIMEOUT,
comm_base_handle_slow_accept, b);
- if(event_base_set(b->eb->base,
- &b->eb->slow_accept) != 0) {
+ if(b->eb->slow_accept == NULL) {
/* we do not want to log here, because
* that would spam the logfiles.
* error: "event_base_set failed." */
}
- if(event_add(&b->eb->slow_accept, &tv) != 0) {
+ else if(ub_event_add(b->eb->slow_accept, &tv)
+ != 0) {
/* we do not want to log here,
* error: "event_add failed." */
}
WSAGetLastError() == WSAECONNRESET)
return -1;
if(WSAGetLastError() == WSAEWOULDBLOCK) {
- winsock_tcp_wouldblock(&c->ev->ev, EV_READ);
+ ub_winsock_tcp_wouldblock(c->ev->ev, UB_EV_READ);
return -1;
}
log_err_addr("accept failed", wsa_strerror(WSAGetLastError()),
if( (oper == (BIO_CB_READ|BIO_CB_RETURN) && argl == 0) ||
(oper == (BIO_CB_GETS|BIO_CB_RETURN) && argl == 0)) {
if(WSAGetLastError() == WSAEWOULDBLOCK)
- winsock_tcp_wouldblock((struct event*)
- BIO_get_callback_arg(b), EV_READ);
+ ub_winsock_tcp_wouldblock((struct ub_event*)
+ BIO_get_callback_arg(b), UB_EV_READ);
}
if( (oper == (BIO_CB_WRITE|BIO_CB_RETURN) && argl == 0) ||
(oper == (BIO_CB_PUTS|BIO_CB_RETURN) && argl == 0)) {
if(WSAGetLastError() == WSAEWOULDBLOCK)
- winsock_tcp_wouldblock((struct event*)
- BIO_get_callback_arg(b), EV_WRITE);
+ ub_winsock_tcp_wouldblock((struct ub_event*)
+ BIO_get_callback_arg(b), UB_EV_WRITE);
}
/* return original return value */
return retvalue;
SSL* ssl = (SSL*)thessl;
/* set them both just in case, but usually they are the same BIO */
BIO_set_callback(SSL_get_rbio(ssl), &win_bio_cb);
- BIO_set_callback_arg(SSL_get_rbio(ssl), (char*)&c->ev->ev);
+ BIO_set_callback_arg(SSL_get_rbio(ssl), (char*)c->ev->ev);
BIO_set_callback(SSL_get_wbio(ssl), &win_bio_cb);
- BIO_set_callback_arg(SSL_get_wbio(ssl), (char*)&c->ev->ev);
+ BIO_set_callback_arg(SSL_get_wbio(ssl), (char*)c->ev->ev);
}
#endif
struct comm_point* c = (struct comm_point*)arg, *c_hdl;
int new_fd;
log_assert(c->type == comm_tcp_accept);
- if(!(event & EV_READ)) {
+ if(!(event & UB_EV_READ)) {
log_info("ignoring tcp accept event %d", (int)event);
return;
}
- comm_base_now(c->ev->base);
+ ub_comm_base_now(c->ev->base);
/* find free tcp handler. */
if(!c->tcp_free) {
log_warn("accepted too many tcp, connections full");
if(WSAGetLastError() == WSAEINPROGRESS)
return 1;
if(WSAGetLastError() == WSAEWOULDBLOCK) {
- winsock_tcp_wouldblock(&c->ev->ev, EV_READ);
+ ub_winsock_tcp_wouldblock(c->ev->ev,
+ UB_EV_READ);
return 1;
}
log_err_addr("read (in tcp s)",
if(WSAGetLastError() == WSAEINPROGRESS)
return 1;
if(WSAGetLastError() == WSAEWOULDBLOCK) {
- winsock_tcp_wouldblock(&c->ev->ev, EV_READ);
+ ub_winsock_tcp_wouldblock(c->ev->ev, UB_EV_READ);
return 1;
}
log_err_addr("read (in tcp r)",
if(error == WSAEINPROGRESS)
return 1;
else if(error == WSAEWOULDBLOCK) {
- winsock_tcp_wouldblock(&c->ev->ev, EV_WRITE);
+ ub_winsock_tcp_wouldblock(c->ev->ev, UB_EV_WRITE);
return 1;
} else if(error != 0 && verbosity < 2)
return 0;
if(WSAGetLastError() == WSAEINPROGRESS)
return 1;
if(WSAGetLastError() == WSAEWOULDBLOCK) {
- winsock_tcp_wouldblock(&c->ev->ev, EV_WRITE);
+ ub_winsock_tcp_wouldblock(c->ev->ev,
+ UB_EV_WRITE);
return 1;
}
log_err_addr("tcp send s",
if(WSAGetLastError() == WSAEINPROGRESS)
return 1;
if(WSAGetLastError() == WSAEWOULDBLOCK) {
- winsock_tcp_wouldblock(&c->ev->ev, EV_WRITE);
+ ub_winsock_tcp_wouldblock(c->ev->ev, UB_EV_WRITE);
return 1;
}
log_err_addr("tcp send r", wsa_strerror(WSAGetLastError()),
{
struct comm_point* c = (struct comm_point*)arg;
log_assert(c->type == comm_tcp);
- comm_base_now(c->ev->base);
+ ub_comm_base_now(c->ev->base);
- if(event&EV_READ) {
+ if(event&UB_EV_READ) {
if(!comm_point_tcp_handle_read(fd, c, 0)) {
reclaim_tcp_handler(c);
if(!c->tcp_do_close) {
}
return;
}
- if(event&EV_WRITE) {
+ if(event&UB_EV_WRITE) {
if(!comm_point_tcp_handle_write(fd, c)) {
reclaim_tcp_handler(c);
if(!c->tcp_do_close) {
}
return;
}
- if(event&EV_TIMEOUT) {
+ if(event&UB_EV_TIMEOUT) {
verbose(VERB_QUERY, "tcp took too long, dropped");
reclaim_tcp_handler(c);
if(!c->tcp_do_close) {
{
struct comm_point* c = (struct comm_point*)arg;
log_assert(c->type == comm_local);
- comm_base_now(c->ev->base);
+ ub_comm_base_now(c->ev->base);
- if(event&EV_READ) {
+ if(event&UB_EV_READ) {
if(!comm_point_tcp_handle_read(fd, c, 1)) {
fptr_ok(fptr_whitelist_comm_point(c->callback));
(void)(*c->callback)(c, c->cb_arg, NETEVENT_CLOSED,
struct comm_point* c = (struct comm_point*)arg;
int err = NETEVENT_NOERROR;
log_assert(c->type == comm_raw);
- comm_base_now(c->ev->base);
+ ub_comm_base_now(c->ev->base);
- if(event&EV_TIMEOUT)
+ if(event&UB_EV_TIMEOUT)
err = NETEVENT_TIMEOUT;
fptr_ok(fptr_whitelist_comm_point_raw(c->callback));
(void)(*c->callback)(c, c->cb_arg, err, NULL);
c->inuse = 0;
c->callback = callback;
c->cb_arg = callback_arg;
- evbits = EV_READ | EV_PERSIST;
- /* libevent stuff */
- event_set(&c->ev->ev, c->fd, evbits, comm_point_udp_callback, c);
- if(event_base_set(base->eb->base, &c->ev->ev) != 0) {
+ evbits = UB_EV_READ | UB_EV_PERSIST;
+ /* ub_event stuff */
+ c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
+ comm_point_udp_callback, c);
+ if(c->ev->ev == NULL) {
log_err("could not baseset udp event");
comm_point_delete(c);
return NULL;
}
- if(fd!=-1 && event_add(&c->ev->ev, c->timeout) != 0 ) {
+ if(fd!=-1 && ub_event_add(c->ev->ev, c->timeout) != 0 ) {
log_err("could not add udp event");
comm_point_delete(c);
return NULL;
c->tcp_check_nb_connect = 0;
c->callback = callback;
c->cb_arg = callback_arg;
- evbits = EV_READ | EV_PERSIST;
- /* libevent stuff */
- event_set(&c->ev->ev, c->fd, evbits, comm_point_udp_ancil_callback, c);
- if(event_base_set(base->eb->base, &c->ev->ev) != 0) {
+ evbits = UB_EV_READ | UB_EV_PERSIST;
+ /* ub_event stuff */
+ c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
+ comm_point_udp_ancil_callback, c);
+ if(c->ev->ev == NULL) {
log_err("could not baseset udp event");
comm_point_delete(c);
return NULL;
}
- if(fd!=-1 && event_add(&c->ev->ev, c->timeout) != 0 ) {
+ if(fd!=-1 && ub_event_add(c->ev->ev, c->timeout) != 0 ) {
log_err("could not add udp event");
comm_point_delete(c);
return NULL;
/* add to parent free list */
c->tcp_free = parent->tcp_free;
parent->tcp_free = c;
- /* libevent stuff */
- evbits = EV_PERSIST | EV_READ | EV_TIMEOUT;
- event_set(&c->ev->ev, c->fd, evbits, comm_point_tcp_handle_callback, c);
- if(event_base_set(base->eb->base, &c->ev->ev) != 0)
+ /* ub_event stuff */
+ evbits = UB_EV_PERSIST | UB_EV_READ | UB_EV_TIMEOUT;
+ c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
+ comm_point_tcp_handle_callback, c);
+ if(c->ev->ev == NULL)
{
log_err("could not basetset tcphdl event");
parent->tcp_free = c->tcp_free;
c->tcp_check_nb_connect = 0;
c->callback = NULL;
c->cb_arg = NULL;
- evbits = EV_READ | EV_PERSIST;
- /* libevent stuff */
- event_set(&c->ev->ev, c->fd, evbits, comm_point_tcp_accept_callback, c);
- if(event_base_set(base->eb->base, &c->ev->ev) != 0 ||
- event_add(&c->ev->ev, c->timeout) != 0 )
- {
+ evbits = UB_EV_READ | UB_EV_PERSIST;
+ /* ub_event stuff */
+ c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
+ comm_point_tcp_accept_callback, c);
+ if(c->ev->ev == NULL) {
+ log_err("could not baseset tcpacc event");
+ comm_point_delete(c);
+ return NULL;
+ }
+ if (ub_event_add(c->ev->ev, c->timeout) != 0) {
log_err("could not add tcpacc event");
comm_point_delete(c);
return NULL;
}
-
/* now prealloc the tcp handlers */
for(i=0; i<num; i++) {
c->tcp_handlers[i] = comm_point_create_tcp_handler(base,
c->repinfo.c = c;
c->callback = callback;
c->cb_arg = callback_arg;
- evbits = EV_PERSIST | EV_WRITE;
- event_set(&c->ev->ev, c->fd, evbits, comm_point_tcp_handle_callback, c);
- if(event_base_set(base->eb->base, &c->ev->ev) != 0)
+ evbits = UB_EV_PERSIST | UB_EV_WRITE;
+ c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
+ comm_point_tcp_handle_callback, c);
+ if(c->ev->ev == NULL)
{
- log_err("could not basetset tcpout event");
+ log_err("could not baseset tcpout event");
sldns_buffer_free(c->buffer);
free(c->ev);
free(c);
c->tcp_check_nb_connect = 0;
c->callback = callback;
c->cb_arg = callback_arg;
- /* libevent stuff */
- evbits = EV_PERSIST | EV_READ;
- event_set(&c->ev->ev, c->fd, evbits, comm_point_local_handle_callback,
- c);
- if(event_base_set(base->eb->base, &c->ev->ev) != 0 ||
- event_add(&c->ev->ev, c->timeout) != 0 )
- {
+ /* ub_event stuff */
+ evbits = UB_EV_PERSIST | UB_EV_READ;
+ c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
+ comm_point_local_handle_callback, c);
+ if(c->ev->ev == NULL) {
+ log_err("could not baseset localhdl event");
+ free(c->ev);
+ free(c);
+ return NULL;
+ }
+ if (ub_event_add(c->ev->ev, c->timeout) != 0) {
log_err("could not add localhdl event");
+ ub_event_free(c->ev->ev);
free(c->ev);
free(c);
return NULL;
c->tcp_check_nb_connect = 0;
c->callback = callback;
c->cb_arg = callback_arg;
- /* libevent stuff */
+ /* ub_event stuff */
if(writing)
- evbits = EV_PERSIST | EV_WRITE;
- else evbits = EV_PERSIST | EV_READ;
- event_set(&c->ev->ev, c->fd, evbits, comm_point_raw_handle_callback,
- c);
- if(event_base_set(base->eb->base, &c->ev->ev) != 0 ||
- event_add(&c->ev->ev, c->timeout) != 0 )
- {
+ evbits = UB_EV_PERSIST | UB_EV_WRITE;
+ else evbits = UB_EV_PERSIST | UB_EV_READ;
+ c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
+ comm_point_raw_handle_callback, c);
+ if(c->ev->ev == NULL) {
+ log_err("could not baseset rawhdl event");
+ free(c->ev);
+ free(c);
+ return NULL;
+ }
+ if (ub_event_add(c->ev->ev, c->timeout) != 0) {
log_err("could not add rawhdl event");
+ ub_event_free(c->ev->ev);
free(c->ev);
free(c);
return NULL;
if(!c)
return;
if(c->fd != -1)
- if(event_del(&c->ev->ev) != 0) {
+ if(ub_event_del(c->ev->ev) != 0) {
log_err("could not event_del on close");
}
/* close fd after removing from event lists, or epoll.. is messed up */
free(c->timeout);
if(c->type == comm_tcp || c->type == comm_local)
sldns_buffer_free(c->buffer);
+ ub_event_free(c->ev->ev);
free(c->ev);
free(c);
}
comm_point_stop_listening(struct comm_point* c)
{
verbose(VERB_ALGO, "comm point stop listening %d", c->fd);
- if(event_del(&c->ev->ev) != 0) {
+ if(ub_event_del(c->ev->ev) != 0) {
log_err("event_del error to stoplisten");
}
}
return;
}
}
- c->ev->ev.ev_events |= EV_TIMEOUT;
+ ub_event_add_bits(c->ev->ev, UB_EV_TIMEOUT);
#ifndef S_SPLINT_S /* splint fails on struct timeval. */
c->timeout->tv_sec = sec;
c->timeout->tv_usec = 0;
#endif /* S_SPLINT_S */
}
if(c->type == comm_tcp) {
- c->ev->ev.ev_events &= ~(EV_READ|EV_WRITE);
+ ub_event_del_bits(c->ev->ev, UB_EV_READ|UB_EV_WRITE);
if(c->tcp_is_reading)
- c->ev->ev.ev_events |= EV_READ;
- else c->ev->ev.ev_events |= EV_WRITE;
+ ub_event_add_bits(c->ev->ev, UB_EV_READ);
+ else ub_event_add_bits(c->ev->ev, UB_EV_WRITE);
}
if(newfd != -1) {
if(c->fd != -1) {
#endif
}
c->fd = newfd;
- c->ev->ev.ev_fd = c->fd;
+ ub_event_set_fd(c->ev->ev, c->fd);
}
- if(event_add(&c->ev->ev, sec==0?NULL:c->timeout) != 0) {
+ if(ub_event_add(c->ev->ev, sec==0?NULL:c->timeout) != 0) {
log_err("event_add failed. in cpsl.");
}
}
void comm_point_listen_for_rw(struct comm_point* c, int rd, int wr)
{
verbose(VERB_ALGO, "comm point listen_for_rw %d %d", c->fd, wr);
- if(event_del(&c->ev->ev) != 0) {
+ if(ub_event_del(c->ev->ev) != 0) {
log_err("event_del error to cplf");
}
- c->ev->ev.ev_events &= ~(EV_READ|EV_WRITE);
- if(rd) c->ev->ev.ev_events |= EV_READ;
- if(wr) c->ev->ev.ev_events |= EV_WRITE;
- if(event_add(&c->ev->ev, c->timeout) != 0) {
+ ub_event_del_bits(c->ev->ev, UB_EV_READ|UB_EV_WRITE);
+ if(rd) ub_event_add_bits(c->ev->ev, UB_EV_READ);
+ if(wr) ub_event_add_bits(c->ev->ev, UB_EV_WRITE);
+ if(ub_event_add(c->ev->ev, c->timeout) != 0) {
log_err("event_add failed. in cplf.");
}
}
struct comm_timer*
comm_timer_create(struct comm_base* base, void (*cb)(void*), void* cb_arg)
{
- struct comm_timer *tm = (struct comm_timer*)calloc(1,
- sizeof(struct comm_timer));
- if(!tm)
- return NULL;
- tm->ev_timer = (struct internal_timer*)calloc(1,
+ struct internal_timer *tm = (struct internal_timer*)calloc(1,
sizeof(struct internal_timer));
- if(!tm->ev_timer) {
+ if(!tm) {
log_err("malloc failed");
- free(tm);
return NULL;
}
- tm->ev_timer->base = base;
- tm->callback = cb;
- tm->cb_arg = cb_arg;
- event_set(&tm->ev_timer->ev, -1, EV_TIMEOUT,
- comm_timer_callback, tm);
- if(event_base_set(base->eb->base, &tm->ev_timer->ev) != 0) {
+ tm->super.ev_timer = tm;
+ tm->base = base;
+ tm->super.callback = cb;
+ tm->super.cb_arg = cb_arg;
+ tm->ev = ub_event_new(base->eb->base, -1, UB_EV_TIMEOUT,
+ comm_timer_callback, &tm->super);
+ if(tm->ev == NULL) {
log_err("timer_create: event_base_set failed.");
- free(tm->ev_timer);
free(tm);
return NULL;
}
- return tm;
+ return &tm->super;
}
void
{
if(!timer)
return;
- evtimer_del(&timer->ev_timer->ev);
+ ub_timer_del(timer->ev_timer->ev);
timer->ev_timer->enabled = 0;
}
log_assert(tv);
if(timer->ev_timer->enabled)
comm_timer_disable(timer);
- event_set(&timer->ev_timer->ev, -1, EV_TIMEOUT,
- comm_timer_callback, timer);
- if(event_base_set(timer->ev_timer->base->eb->base,
- &timer->ev_timer->ev) != 0)
- log_err("comm_timer_set: set_base failed.");
- if(evtimer_add(&timer->ev_timer->ev, tv) != 0)
+ if(ub_timer_add(timer->ev_timer->ev, timer->ev_timer->base->eb->base,
+ comm_timer_callback, timer, tv) != 0)
log_err("comm_timer_set: evtimer_add failed.");
timer->ev_timer->enabled = 1;
}
if(!timer)
return;
comm_timer_disable(timer);
+ /* Free the sub struct timer->ev_timer derived from the super struct timer.
+ * i.e. assert(timer == timer->ev_timer)
+ */
+ ub_event_free(timer->ev_timer->ev);
free(timer->ev_timer);
- free(timer);
}
void
comm_timer_callback(int ATTR_UNUSED(fd), short event, void* arg)
{
struct comm_timer* tm = (struct comm_timer*)arg;
- if(!(event&EV_TIMEOUT))
+ if(!(event&UB_EV_TIMEOUT))
return;
- comm_base_now(tm->ev_timer->base);
+ ub_comm_base_now(tm->ev_timer->base);
tm->ev_timer->enabled = 0;
fptr_ok(fptr_whitelist_comm_timer(tm->callback));
(*tm->callback)(tm->cb_arg);
}
size_t
-comm_timer_get_mem(struct comm_timer* timer)
+comm_timer_get_mem(struct comm_timer* ATTR_UNUSED(timer))
{
- return sizeof(*timer) + sizeof(struct internal_timer);
+ return sizeof(struct internal_timer);
}
struct comm_signal*
comm_signal_callback(int sig, short event, void* arg)
{
struct comm_signal* comsig = (struct comm_signal*)arg;
- if(!(event & EV_SIGNAL))
+ if(!(event & UB_EV_SIGNAL))
return;
- comm_base_now(comsig->base);
+ ub_comm_base_now(comsig->base);
fptr_ok(fptr_whitelist_comm_signal(comsig->callback));
(*comsig->callback)(sig, comsig->cb_arg);
}
}
log_assert(comsig);
/* add signal event */
- signal_set(&entry->ev, sig, comm_signal_callback, comsig);
- if(event_base_set(comsig->base->eb->base, &entry->ev) != 0) {
- log_err("Could not set signal base");
+ entry->ev = ub_signal_new(comsig->base->eb->base, sig,
+ comm_signal_callback, comsig);
+ if(entry->ev == NULL) {
+ log_err("Could not create signal event");
free(entry);
return 0;
}
- if(signal_add(&entry->ev, NULL) != 0) {
+ if(ub_signal_add(entry->ev, NULL) != 0) {
log_err("Could not add signal handler");
+ ub_event_free(entry->ev);
free(entry);
return 0;
}
p=comsig->ev_signal;
while(p) {
np = p->next;
- signal_del(&p->ev);
+ ub_signal_del(p->ev);
+ ub_event_free(p->ev);
free(p);
p = np;
}
struct sldns_buffer;
struct comm_point;
struct comm_reply;
-struct event_base;
+struct ub_event_base;
/* internal event notification data storage structure. */
struct internal_event;
struct internal_base;
-struct internal_timer;
+struct internal_timer; /* A sub struct of the comm_timer super struct */
/** callback from communication point function type */
typedef int comm_point_callback_t(struct comm_point*, void*, int,
* Structure only for making timeout events.
*/
struct comm_timer {
- /** the internal event stuff */
+ /** the internal event stuff (derived) */
struct internal_timer* ev_timer;
/** callback function, takes user arg only */
struct comm_base* comm_base_create(int sigs);
/**
- * Create comm base that uses the given event_base (underlying event
- * mechanism pointer).
- * @param base: underlying lib event base.
+ * Create comm base that uses the given ub_event_base (underlying pluggable
+ * event mechanism pointer).
+ * @param base: underlying pluggable event base.
* @return: the new comm base. NULL on error.
*/
-struct comm_base* comm_base_create_event(struct event_base* base);
+struct comm_base* comm_base_create_event(struct ub_event_base* base);
/**
* Delete comm base structure but not the underlying lib event base.
/**
* Access internal data structure (for util/tube.c on windows)
* @param b: comm base
- * @return event_base. Could be libevent, or internal event handler.
+ * @return ub_event_base.
*/
-struct event_base* comm_base_internal(struct comm_base* b);
+struct ub_event_base* comm_base_internal(struct comm_base* b);
/**
* Create an UDP comm point. Calls malloc.
#include "util/net_help.h"
#include "util/netevent.h"
#include "util/fptr_wlist.h"
+#include "util/ub_event.h"
#ifndef USE_WINSOCK
/* on unix */
d = r;
while(d != (ssize_t)sizeof(len)) {
if((r=write(fd, ((char*)&len)+d, sizeof(len)-d)) == -1) {
+ if(errno == EAGAIN)
+ continue; /* temporarily unavail: try again*/
log_err("tube msg write failed: %s", strerror(errno));
(void)fd_set_nonblock(fd);
return 0;
d = 0;
while(d != (ssize_t)len) {
if((r=write(fd, buf+d, len-d)) == -1) {
+ if(errno == EAGAIN)
+ continue; /* temporarily unavail: try again*/
log_err("tube msg write failed: %s", strerror(errno));
(void)fd_set_nonblock(fd);
return 0;
void tube_remove_bg_listen(struct tube* tube)
{
verbose(VERB_ALGO, "tube remove_bg_listen");
- winsock_unregister_wsaevent(&tube->ev_listen);
+ ub_winsock_unregister_wsaevent(tube->ev_listen);
}
void tube_remove_bg_write(struct tube* tube)
tube->listen_arg = arg;
if(!comm_base_internal(base))
return 1; /* ignore when no comm base - testing */
- return winsock_register_wsaevent(comm_base_internal(base),
- &tube->ev_listen, tube->event, &tube_handle_signal, tube);
+ tube->ev_listen = ub_winsock_register_wsaevent(
+ comm_base_internal(base), tube->event, &tube_handle_signal, tube);
+ return tube->ev_listen ? 1 : 0;
}
int tube_setup_bg_write(struct tube* ATTR_UNUSED(tube),
struct tube_res_list;
#ifdef USE_WINSOCK
#include "util/locks.h"
-#include "util/winsock_event.h"
#endif
/**
/** the windows sockets event (signaled if items in pipe) */
WSAEVENT event;
/** winsock event storage when registered with event base */
- struct event ev_listen;
+ struct ub_event* ev_listen;
/** lock on the list of outstanding items */
lock_basic_t res_lock;
--- /dev/null
+/*
+ * util/ub_event.c - directly call libevent (compatability) functions
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains and implementation for the indirection layer for pluggable
+ * events that transparently passes it either directly to libevent, or calls
+ * the libevent compatibility layer functions.
+ */
+#include "config.h"
+#include <sys/time.h>
+#include "util/ub_event.h"
+#include "util/log.h"
+#include "util/netevent.h"
+#include "util/tube.h"
+
+/* We define libevent structures here to hide the libevent stuff. */
+
+#ifdef USE_MINI_EVENT
+# ifdef USE_WINSOCK
+# include "util/winsock_event.h"
+# else
+# include "util/mini_event.h"
+# endif /* USE_WINSOCK */
+#else /* USE_MINI_EVENT */
+ /* we use libevent */
+# ifdef HAVE_EVENT_H
+# include <event.h>
+# else
+# include "event2/event.h"
+# include "event2/event_struct.h"
+# include "event2/event_compat.h"
+# endif
+#endif /* USE_MINI_EVENT */
+
+#if UB_EV_TIMEOUT != EV_TIMEOUT || UB_EV_READ != EV_READ || \
+ UB_EV_WRITE != EV_WRITE || UB_EV_SIGNAL != EV_SIGNAL || \
+ UB_EV_PERSIST != EV_PERSIST
+/* Only necessary for libev */
+# define NATIVE_BITS(b) ( \
+ (((b) & UB_EV_TIMEOUT) ? EV_TIMEOUT : 0) \
+ | (((b) & UB_EV_READ ) ? EV_READ : 0) \
+ | (((b) & UB_EV_WRITE ) ? EV_WRITE : 0) \
+ | (((b) & UB_EV_SIGNAL ) ? EV_SIGNAL : 0) \
+ | (((b) & UB_EV_PERSIST) ? EV_PERSIST : 0))
+
+# define UB_EV_BITS(b) ( \
+ (((b) & EV_TIMEOUT) ? UB_EV_TIMEOUT : 0) \
+ | (((b) & EV_READ ) ? UB_EV_READ : 0) \
+ | (((b) & EV_WRITE ) ? UB_EV_WRITE : 0) \
+ | (((b) & EV_SIGNAL ) ? UB_EV_SIGNAL : 0) \
+ | (((b) & EV_PERSIST) ? UB_EV_PERSIST : 0))
+
+# define UB_EV_BITS_CB(C) void my_ ## C (int fd, short bits, void *arg) \
+ { (C)(fd, UB_EV_BITS(bits), arg); }
+
+UB_EV_BITS_CB(comm_point_udp_callback);
+UB_EV_BITS_CB(comm_point_udp_ancil_callback)
+UB_EV_BITS_CB(comm_point_tcp_accept_callback)
+UB_EV_BITS_CB(comm_point_tcp_handle_callback)
+UB_EV_BITS_CB(comm_timer_callback)
+UB_EV_BITS_CB(comm_signal_callback)
+UB_EV_BITS_CB(comm_point_local_handle_callback)
+UB_EV_BITS_CB(comm_point_raw_handle_callback)
+UB_EV_BITS_CB(tube_handle_signal)
+UB_EV_BITS_CB(comm_base_handle_slow_accept)
+
+static void (*NATIVE_BITS_CB(void (*cb)(int, short, void*)))(int, short, void*)
+{
+ if(cb == comm_point_udp_callback)
+ return my_comm_point_udp_callback;
+ else if(cb == comm_point_udp_ancil_callback)
+ return my_comm_point_udp_ancil_callback;
+ else if(cb == comm_point_tcp_accept_callback)
+ return my_comm_point_tcp_accept_callback;
+ else if(cb == comm_point_tcp_handle_callback)
+ return my_comm_point_tcp_handle_callback;
+ else if(cb == comm_timer_callback)
+ return my_comm_timer_callback;
+ else if(cb == comm_signal_callback)
+ return my_comm_signal_callback;
+ else if(cb == comm_point_local_handle_callback)
+ return my_comm_point_local_handle_callback;
+ else if(cb == comm_point_raw_handle_callback)
+ return my_comm_point_raw_handle_callback;
+ else if(cb == tube_handle_signal)
+ return my_tube_handle_signal;
+ else if(cb == comm_base_handle_slow_accept)
+ return my_comm_base_handle_slow_accept;
+ else
+ return NULL;
+}
+#else
+# define NATIVE_BITS(b) (b)
+# define NATIVE_BITS_CB(c) (c)
+#endif
+
+#ifndef EVFLAG_AUTO
+#define EVFLAG_AUTO 0
+#endif
+
+#define AS_EVENT_BASE(x) \
+ (((union {struct ub_event_base* a; struct event_base* b;})x).b)
+#define AS_UB_EVENT_BASE(x) \
+ (((union {struct event_base* a; struct ub_event_base* b;})x).b)
+#define AS_EVENT(x) \
+ (((union {struct ub_event* a; struct event* b;})x).b)
+#define AS_UB_EVENT(x) \
+ (((union {struct event* a; struct ub_event* b;})x).b)
+
+const char* ub_event_get_version()
+{
+ return event_get_version();
+}
+
+#if (defined(HAVE_EV_LOOP) || defined(HAVE_EV_DEFAULT_LOOP)) && defined(EVBACKEND_SELECT)
+static const char* ub_ev_backend2str(int b)
+{
+ switch(b) {
+ case EVBACKEND_SELECT: return "select";
+ case EVBACKEND_POLL: return "poll";
+ case EVBACKEND_EPOLL: return "epoll";
+ case EVBACKEND_KQUEUE: return "kqueue";
+ case EVBACKEND_DEVPOLL: return "devpoll";
+ case EVBACKEND_PORT: return "evport";
+ }
+ return "unknown";
+}
+#endif
+
+void
+ub_get_event_sys(struct ub_event_base* base, const char** n, const char** s,
+ const char** m)
+{
+#ifdef USE_WINSOCK
+ (void)base;
+ *n = "event";
+ *s = "winsock";
+ *m = "WSAWaitForMultipleEvents";
+#elif defined(USE_MINI_EVENT)
+ (void)base;
+ *n = "mini-event";
+ *s = "internal";
+ *m = "select";
+#else
+ struct event_base* b = AS_EVENT_BASE(base);
+ *s = event_get_version();
+# if defined(HAVE_EV_LOOP) || defined(HAVE_EV_DEFAULT_LOOP)
+ *n = "libev";
+ if (!b)
+ b = (struct event_base*)ev_default_loop(EVFLAG_AUTO);
+# ifdef EVBACKEND_SELECT
+ *m = ub_ev_backend2str(ev_backend((struct ev_loop*)b));
+# else
+ *m = "not obtainable";
+# endif
+# elif defined(HAVE_EVENT_BASE_GET_METHOD)
+ *n = "libevent";
+ if (!b)
+ b = event_base_new();
+ *m = event_base_get_method(b);
+# else
+ *n = "unknown";
+ *m = "not obtainable";
+ (void)b;
+# endif
+# ifdef HAVE_EVENT_BASE_FREE
+ if (b && b != AS_EVENT_BASE(base))
+ event_base_free(b);
+# endif
+#endif
+}
+
+struct ub_event_base*
+ub_default_event_base(int sigs, time_t* time_secs, struct timeval* time_tv)
+{
+ void* base;
+
+ (void)base;
+#ifdef USE_MINI_EVENT
+ (void)sigs;
+ /* use mini event time-sharing feature */
+ base = event_init(time_secs, time_tv);
+#else
+ (void)time_secs;
+ (void)time_tv;
+# if defined(HAVE_EV_LOOP) || defined(HAVE_EV_DEFAULT_LOOP)
+ /* libev */
+ if(sigs)
+ base = ev_default_loop(EVFLAG_AUTO);
+ else
+ base = ev_loop_new(EVFLAG_AUTO);
+# else
+ (void)sigs;
+# ifdef HAVE_EVENT_BASE_NEW
+ base = event_base_new();
+# else
+ base = event_init();
+# endif
+# endif
+#endif
+ return (struct ub_event_base*)base;
+}
+
+struct ub_event_base *
+ub_libevent_event_base(struct event_base* libevent_base)
+{
+#ifdef USE_MINI_EVENT
+ (void)libevent_base;
+ return NULL;
+#else
+ return AS_UB_EVENT_BASE(libevent_base);
+#endif
+}
+
+struct event_base *
+ub_libevent_get_event_base(struct ub_event_base* base)
+{
+#ifdef USE_MINI_EVENT
+ (void)base;
+ return NULL;
+#else
+ return AS_EVENT_BASE(base);
+#endif
+}
+
+void
+ub_event_base_free(struct ub_event_base* base)
+{
+#ifdef USE_MINI_EVENT
+ event_base_free(AS_EVENT_BASE(base));
+#elif defined(HAVE_EVENT_BASE_FREE) && defined(HAVE_EVENT_BASE_ONCE)
+ /* only libevent 1.2+ has it, but in 1.2 it is broken -
+ assertion fails on signal handling ev that is not deleted
+ in libevent 1.3c (event_base_once appears) this is fixed. */
+ event_base_free(AS_EVENT_BASE(base));
+#else
+ (void)base;
+#endif /* HAVE_EVENT_BASE_FREE and HAVE_EVENT_BASE_ONCE */
+}
+
+int
+ub_event_base_dispatch(struct ub_event_base* base)
+{
+ return event_base_dispatch(AS_EVENT_BASE(base));
+}
+
+int
+ub_event_base_loopexit(struct ub_event_base* base)
+{
+ return event_base_loopexit(AS_EVENT_BASE(base), NULL);
+}
+
+struct ub_event*
+ub_event_new(struct ub_event_base* base, int fd, short bits,
+ void (*cb)(int, short, void*), void* arg)
+{
+ struct event *ev = (struct event*)calloc(1, sizeof(struct event));
+
+ if (!ev)
+ return NULL;
+
+ event_set(ev, fd, NATIVE_BITS(bits), NATIVE_BITS_CB(cb), arg);
+ if (event_base_set(AS_EVENT_BASE(base), ev) != 0) {
+ free(ev);
+ return NULL;
+ }
+ return AS_UB_EVENT(ev);
+}
+
+struct ub_event*
+ub_signal_new(struct ub_event_base* base, int fd,
+ void (*cb)(int, short, void*), void* arg)
+{
+ struct event *ev = (struct event*)calloc(1, sizeof(struct event));
+
+ if (!ev)
+ return NULL;
+
+ signal_set(ev, fd, NATIVE_BITS_CB(cb), arg);
+ if (event_base_set(AS_EVENT_BASE(base), ev) != 0) {
+ free(ev);
+ return NULL;
+ }
+ return AS_UB_EVENT(ev);
+}
+
+struct ub_event*
+ub_winsock_register_wsaevent(struct ub_event_base* base, void* wsaevent,
+ void (*cb)(int, short, void*), void* arg)
+{
+#if defined(USE_MINI_EVENT) && defined(USE_WINSOCK)
+ struct event *ev = (struct event*)calloc(1, sizeof(struct event));
+
+ if (!ev)
+ return NULL;
+
+ if (winsock_register_wsaevent(AS_EVENT_BASE(base), ev, wsaevent, cb,
+ arg))
+ return AS_UB_EVENT(ev);
+ free(ev);
+ return NULL;
+#else
+ (void)base;
+ (void)wsaevent;
+ (void)cb;
+ (void)arg;
+ return NULL;
+#endif
+}
+
+void
+ub_event_add_bits(struct ub_event* ev, short bits)
+{
+ AS_EVENT(ev)->ev_events |= NATIVE_BITS(bits);
+}
+
+void
+ub_event_del_bits(struct ub_event* ev, short bits)
+{
+ AS_EVENT(ev)->ev_events &= ~NATIVE_BITS(bits);
+}
+
+void
+ub_event_set_fd(struct ub_event* ev, int fd)
+{
+ AS_EVENT(ev)->ev_fd = fd;
+}
+
+void
+ub_event_free(struct ub_event* ev)
+{
+ if (ev)
+ free(AS_EVENT(ev));
+}
+
+int
+ub_event_add(struct ub_event* ev, struct timeval* tv)
+{
+ return event_add(AS_EVENT(ev), tv);
+}
+
+int
+ub_event_del(struct ub_event* ev)
+{
+ return event_del(AS_EVENT(ev));
+}
+
+int
+ub_timer_add(struct ub_event* ev, struct ub_event_base* base,
+ void (*cb)(int, short, void*), void* arg, struct timeval* tv)
+{
+ event_set(AS_EVENT(ev), -1, EV_TIMEOUT, NATIVE_BITS_CB(cb), arg);
+ if (event_base_set(AS_EVENT_BASE(base), AS_EVENT(ev)) != 0)
+ return -1;
+ return evtimer_add(AS_EVENT(ev), tv);
+}
+
+int
+ub_timer_del(struct ub_event* ev)
+{
+ return evtimer_del(AS_EVENT(ev));
+}
+
+int
+ub_signal_add(struct ub_event* ev, struct timeval* tv)
+{
+ return signal_add(AS_EVENT(ev), tv);
+}
+
+int
+ub_signal_del(struct ub_event* ev)
+{
+ return signal_del(AS_EVENT(ev));
+}
+
+void
+ub_winsock_unregister_wsaevent(struct ub_event* ev)
+{
+#if defined(USE_MINI_EVENT) && defined(USE_WINSOCK)
+ winsock_unregister_wsaevent(AS_EVENT(ev));
+ free(AS_EVENT(ev));
+#else
+ (void)ev;
+#endif
+}
+
+void
+ub_winsock_tcp_wouldblock(struct ub_event* ev, int eventbits)
+{
+#if defined(USE_MINI_EVENT) && defined(USE_WINSOCK)
+ winsock_tcp_wouldblock(AS_EVENT(ev), NATIVE_BITS(eventbits));
+#else
+ (void)ev;
+ (void)eventbits;
+#endif
+}
+
+void ub_comm_base_now(struct comm_base* cb)
+{
+ #ifdef USE_MINI_EVENT
+/** minievent updates the time when it blocks. */
+ (void)cb; /* nothing to do */
+#else /* !USE_MINI_EVENT */
+/** fillup the time values in the event base */
+ time_t *tt;
+ struct timeval *tv;
+ comm_base_timept(cb, &tt, &tv);
+ if(gettimeofday(tv, NULL) < 0) {
+ log_err("gettimeofday: %s", strerror(errno));
+ }
+ *tt = tv->tv_sec;
+#endif /* USE_MINI_EVENT */
+}
+
--- /dev/null
+/*
+ * util/ub_event.h - indirection layer for pluggable events
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains prototypes for event loop functions.
+ *
+ */
+
+#ifndef UB_EVENT_H
+#define UB_EVENT_H
+
+struct ub_event_base;
+struct ub_event;
+struct comm_base;
+struct event_base;
+
+/** event timeout */
+#define UB_EV_TIMEOUT 0x01
+/** event fd readable */
+#define UB_EV_READ 0x02
+/** event fd writable */
+#define UB_EV_WRITE 0x04
+/** event signal */
+#define UB_EV_SIGNAL 0x08
+/** event must persist */
+#define UB_EV_PERSIST 0x10
+
+/** Returns event-base type. Could be "mini-event", "winsock-event" for the
+ * daemon compile, and will be "pluggable-event<PACKAGE_VERSION>" for
+ * libunbound.
+ */
+const char* ub_event_get_version();
+/** Return the name, system and method for the pluggable event base */
+void ub_get_event_sys(struct ub_event_base*, const char** n, const char** s,
+ const char** m);
+/** Return a default event base. In the deamon thess will be the only event
+ * bases used.
+ */
+struct ub_event_base* ub_default_event_base(int, time_t*, struct timeval*);
+/** Return an ub_event_base constructed for the given libevent event base */
+struct ub_event_base* ub_libevent_event_base(struct event_base*);
+/** Return the libevent base underlying the given ub_event_base. Will return
+ * NULL when the ub_event_base does not have an underlying libevent event base
+ */
+struct event_base* ub_libevent_get_event_base(struct ub_event_base*);
+/** Free event base. Free events yourself */
+void ub_event_base_free(struct ub_event_base*);
+/** Run the event base */
+int ub_event_base_dispatch(struct ub_event_base*);
+/** exit that loop */
+int ub_event_base_loopexit(struct ub_event_base*);
+
+/** Create a new ub_event for the event base */
+struct ub_event* ub_event_new(struct ub_event_base*,
+ int fd, short bits, void (*cb)(int, short, void*), void* arg);
+/** Create a new ub_event signal for the event base */
+struct ub_event* ub_signal_new(struct ub_event_base*, int fd,
+ void (*cb)(int, short, void*), void* arg);
+/** Create a new ub_event associated with the wsaevent for the event base */
+struct ub_event* ub_winsock_register_wsaevent(struct ub_event_base*,
+ void* wsaevent, void (*cb)(int, short, void*), void* arg);
+
+/** Add event bits for this event to fire on */
+void ub_event_add_bits(struct ub_event*, short bits);
+ /** Configure the event so it will not longer fire on given bits */
+void ub_event_del_bits(struct ub_event*, short bits);
+/** Change or set the file descriptor on the event */
+void ub_event_set_fd(struct ub_event*, int fd);
+/** free the event */
+void ub_event_free(struct ub_event*);
+/** Activate the event. The given timeval is an timeout value. */
+int ub_event_add(struct ub_event*, struct timeval*);
+/** Deactivate the event */
+int ub_event_del(struct ub_event*);
+/** Reconfigure and activate a timeout event */
+int ub_timer_add(struct ub_event*, struct ub_event_base*,
+ void (*cb)(int, short, void*), void* arg, struct timeval*);
+/** Deactivate the timeout event */
+int ub_timer_del(struct ub_event*);
+/** Activate a signal event */
+int ub_signal_add(struct ub_event*, struct timeval*);
+/** Deactivate a signal event */
+int ub_signal_del(struct ub_event*);
+/** Free a with a wsaevent associated event */
+void ub_winsock_unregister_wsaevent(struct ub_event* ev);
+/** Signal the eventloop when a TCP windows socket will block on next read
+ * or write (given by the eventbits)
+ */
+void ub_winsock_tcp_wouldblock(struct ub_event*, int bits);
+/** Equip the comm_base with the current time */
+void ub_comm_base_now(struct comm_base* cb);
+
+#endif /* UB_EVENT_H */
--- /dev/null
+/*
+ * util/ub_event_pluggable.c - call registered pluggable event functions
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains an implementation for the indirection layer for pluggable
+ * events that calls the registered pluggable event loop. It also defines a
+ * default pluggable event loop based on the default libevent (compatibility)
+ * functions.
+ */
+#include "config.h"
+#include <sys/time.h>
+#include "util/ub_event.h"
+#include "libunbound/unbound-event.h"
+#include "util/netevent.h"
+#include "util/log.h"
+#include "util/fptr_wlist.h"
+
+/* We define libevent structures here to hide the libevent stuff. */
+
+#ifdef USE_MINI_EVENT
+# ifdef USE_WINSOCK
+# include "util/winsock_event.h"
+# else
+# include "util/mini_event.h"
+# endif /* USE_WINSOCK */
+#else /* USE_MINI_EVENT */
+ /* we use libevent */
+# ifdef HAVE_EVENT_H
+# include <event.h>
+# else
+# include "event2/event.h"
+# include "event2/event_struct.h"
+# include "event2/event_compat.h"
+# endif
+#endif /* USE_MINI_EVENT */
+
+#if UB_EV_TIMEOUT != EV_TIMEOUT || UB_EV_READ != EV_READ || \
+ UB_EV_WRITE != EV_WRITE || UB_EV_SIGNAL != EV_SIGNAL || \
+ UB_EV_PERSIST != EV_PERSIST
+/* Only necessary for libev */
+# define NATIVE_BITS(b) ( \
+ (((b) & UB_EV_TIMEOUT) ? EV_TIMEOUT : 0) \
+ | (((b) & UB_EV_READ ) ? EV_READ : 0) \
+ | (((b) & UB_EV_WRITE ) ? EV_WRITE : 0) \
+ | (((b) & UB_EV_SIGNAL ) ? EV_SIGNAL : 0) \
+ | (((b) & UB_EV_PERSIST) ? EV_PERSIST : 0))
+
+# define UB_EV_BITS(b) ( \
+ (((b) & EV_TIMEOUT) ? UB_EV_TIMEOUT : 0) \
+ | (((b) & EV_READ ) ? UB_EV_READ : 0) \
+ | (((b) & EV_WRITE ) ? UB_EV_WRITE : 0) \
+ | (((b) & EV_SIGNAL ) ? UB_EV_SIGNAL : 0) \
+ | (((b) & EV_PERSIST) ? UB_EV_PERSIST : 0))
+
+# define UB_EV_BITS_CB(C) void my_ ## C (int fd, short bits, void *arg) \
+ { (C)(fd, UB_EV_BITS(bits), arg); }
+
+UB_EV_BITS_CB(comm_point_udp_callback);
+UB_EV_BITS_CB(comm_point_udp_ancil_callback)
+UB_EV_BITS_CB(comm_point_tcp_accept_callback)
+UB_EV_BITS_CB(comm_point_tcp_handle_callback)
+UB_EV_BITS_CB(comm_timer_callback)
+UB_EV_BITS_CB(comm_signal_callback)
+UB_EV_BITS_CB(comm_point_local_handle_callback)
+UB_EV_BITS_CB(comm_point_raw_handle_callback)
+UB_EV_BITS_CB(tube_handle_signal)
+UB_EV_BITS_CB(comm_base_handle_slow_accept)
+
+static void (*NATIVE_BITS_CB(void (*cb)(int, short, void*)))(int, short, void*)
+{
+ if(cb == comm_point_udp_callback)
+ return my_comm_point_udp_callback;
+ else if(cb == comm_point_udp_ancil_callback)
+ return my_comm_point_udp_ancil_callback;
+ else if(cb == comm_point_tcp_accept_callback)
+ return my_comm_point_tcp_accept_callback;
+ else if(cb == comm_point_tcp_handle_callback)
+ return my_comm_point_tcp_handle_callback;
+ else if(cb == comm_timer_callback)
+ return my_comm_timer_callback;
+ else if(cb == comm_signal_callback)
+ return my_comm_signal_callback;
+ else if(cb == comm_point_local_handle_callback)
+ return my_comm_point_local_handle_callback;
+ else if(cb == comm_point_raw_handle_callback)
+ return my_comm_point_raw_handle_callback;
+ else if(cb == tube_handle_signal)
+ return my_tube_handle_signal;
+ else if(cb == comm_base_handle_slow_accept)
+ return my_comm_base_handle_slow_accept;
+ else
+ return NULL;
+}
+#else
+# define NATIVE_BITS(b) (b)
+# define NATIVE_BITS_CB(c) (c)
+#endif
+
+#ifndef EVFLAG_AUTO
+#define EVFLAG_AUTO 0
+#endif
+
+struct my_event_base {
+ struct ub_event_base super;
+ struct event_base* base;
+};
+
+struct my_event {
+ struct ub_event super;
+ struct event ev;
+};
+
+#define AS_MY_EVENT_BASE(x) \
+ (((union {struct ub_event_base* a; struct my_event_base* b;})x).b)
+#define AS_MY_EVENT(x) \
+ (((union {struct ub_event* a; struct my_event* b;})x).b)
+
+const char* ub_event_get_version()
+{
+ return "pluggable-event"PACKAGE_VERSION;
+}
+
+static void
+my_event_add_bits(struct ub_event* ev, short bits)
+{
+ AS_MY_EVENT(ev)->ev.ev_events |= NATIVE_BITS(bits);
+}
+
+static void
+my_event_del_bits(struct ub_event* ev, short bits)
+{
+ AS_MY_EVENT(ev)->ev.ev_events &= ~NATIVE_BITS(bits);
+}
+
+static void
+my_event_set_fd(struct ub_event* ev, int fd)
+{
+ AS_MY_EVENT(ev)->ev.ev_fd = fd;
+}
+
+static void
+my_event_free(struct ub_event* ev)
+{
+ free(AS_MY_EVENT(ev));
+}
+
+static int
+my_event_add(struct ub_event* ev, struct timeval* tv)
+{
+ return event_add(&AS_MY_EVENT(ev)->ev, tv);
+}
+
+static int
+my_event_del(struct ub_event* ev)
+{
+ return event_del(&AS_MY_EVENT(ev)->ev);
+}
+
+static int
+my_timer_add(struct ub_event* ev, struct ub_event_base* base,
+ void (*cb)(int, short, void*), void* arg, struct timeval* tv)
+{
+ event_set(&AS_MY_EVENT(ev)->ev, -1, EV_TIMEOUT,NATIVE_BITS_CB(cb),arg);
+ if (event_base_set(AS_MY_EVENT_BASE(base)->base, &AS_MY_EVENT(ev)->ev)
+ != 0)
+ return -1;
+ return evtimer_add(&AS_MY_EVENT(ev)->ev, tv);
+}
+
+static int
+my_timer_del(struct ub_event* ev)
+{
+ return evtimer_del(&AS_MY_EVENT(ev)->ev);
+}
+
+static int
+my_signal_add(struct ub_event* ev, struct timeval* tv)
+{
+ return signal_add(&AS_MY_EVENT(ev)->ev, tv);
+}
+
+static int
+my_signal_del(struct ub_event* ev)
+{
+ return signal_del(&AS_MY_EVENT(ev)->ev);
+}
+
+static void
+my_winsock_unregister_wsaevent(struct ub_event* ev)
+{
+#if defined(USE_MINI_EVENT) && defined(USE_WINSOCK)
+ winsock_unregister_wsaevent(&AS_MY_EVENT(ev)->ev);
+ free(AS_MY_EVENT(ev));
+#else
+ (void)ev;
+#endif
+}
+
+static void
+my_winsock_tcp_wouldblock(struct ub_event* ev, int eventbits)
+{
+#if defined(USE_MINI_EVENT) && defined(USE_WINSOCK)
+ winsock_tcp_wouldblock(&AS_MY_EVENT(ev)->ev, NATIVE_BITS(eventbits));
+#else
+ (void)ev;
+ (void)eventbits;
+#endif
+}
+
+static struct ub_event_vmt default_event_vmt = {
+ my_event_add_bits, my_event_del_bits, my_event_set_fd,
+ my_event_free, my_event_add, my_event_del,
+ my_timer_add, my_timer_del, my_signal_add, my_signal_del,
+ my_winsock_unregister_wsaevent, my_winsock_tcp_wouldblock
+};
+
+static void
+my_event_base_free(struct ub_event_base* base)
+{
+#ifdef USE_MINI_EVENT
+ event_base_free(AS_MY_EVENT_BASE(base)->base);
+#elif defined(HAVE_EVENT_BASE_FREE) && defined(HAVE_EVENT_BASE_ONCE)
+ /* only libevent 1.2+ has it, but in 1.2 it is broken -
+ assertion fails on signal handling ev that is not deleted
+ in libevent 1.3c (event_base_once appears) this is fixed. */
+ event_base_free(AS_MY_EVENT_BASE(base)->base);
+#endif /* HAVE_EVENT_BASE_FREE and HAVE_EVENT_BASE_ONCE */
+ free(AS_MY_EVENT_BASE(base));
+}
+
+static int
+my_event_base_dispatch(struct ub_event_base* base)
+{
+ return event_base_dispatch(AS_MY_EVENT_BASE(base)->base);
+}
+
+static int
+my_event_base_loopexit(struct ub_event_base* base, struct timeval* tv)
+{
+ return event_base_loopexit(AS_MY_EVENT_BASE(base)->base, tv);
+}
+
+static struct ub_event*
+my_event_new(struct ub_event_base* base, int fd, short bits,
+ void (*cb)(int, short, void*), void* arg)
+{
+ struct my_event *my_ev = (struct my_event*)calloc(1,
+ sizeof(struct my_event));
+
+ if (!my_ev)
+ return NULL;
+
+ event_set(&my_ev->ev, fd, NATIVE_BITS(bits), NATIVE_BITS_CB(cb), arg);
+ if (event_base_set(AS_MY_EVENT_BASE(base)->base, &my_ev->ev) != 0) {
+ free(my_ev);
+ return NULL;
+ }
+ my_ev->super.magic = UB_EVENT_MAGIC;
+ my_ev->super.vmt = &default_event_vmt;
+ return &my_ev->super;
+}
+
+static struct ub_event*
+my_signal_new(struct ub_event_base* base, int fd,
+ void (*cb)(int, short, void*), void* arg)
+{
+ struct my_event *my_ev = (struct my_event*)calloc(1,
+ sizeof(struct my_event));
+
+ if (!my_ev)
+ return NULL;
+
+ signal_set(&my_ev->ev, fd, NATIVE_BITS_CB(cb), arg);
+ if (event_base_set(AS_MY_EVENT_BASE(base)->base, &my_ev->ev) != 0) {
+ free(my_ev);
+ return NULL;
+ }
+ my_ev->super.magic = UB_EVENT_MAGIC;
+ my_ev->super.vmt = &default_event_vmt;
+ return &my_ev->super;
+}
+
+static struct ub_event*
+my_winsock_register_wsaevent(struct ub_event_base* base, void* wsaevent,
+ void (*cb)(int, short, void*), void* arg)
+{
+#if defined(USE_MINI_EVENT) && defined(USE_WINSOCK)
+ struct my_event *my_ev = (struct my_event*)calloc(1,
+ sizeof(struct my_event));
+
+ if (!my_ev)
+ return NULL;
+
+ if (!winsock_register_wsaevent(AS_MY_EVENT_BASE(base)->base,
+ &my_ev->ev, wsaevent, cb, arg)) {
+ free(my_ev);
+ return NULL;
+
+ }
+ my_ev->super.magic = UB_EVENT_MAGIC;
+ my_ev->super.vmt = &default_event_vmt;
+ return &my_ev->super;
+#else
+ (void)base;
+ (void)wsaevent;
+ (void)cb;
+ (void)arg;
+ return NULL;
+#endif
+}
+
+static struct ub_event_base_vmt default_event_base_vmt = {
+ my_event_base_free, my_event_base_dispatch,
+ my_event_base_loopexit, my_event_new, my_signal_new,
+ my_winsock_register_wsaevent
+};
+
+struct ub_event_base*
+ub_default_event_base(int sigs, time_t* time_secs, struct timeval* time_tv)
+{
+ struct my_event_base* my_base = (struct my_event_base*)calloc(1,
+ sizeof(struct my_event_base));
+
+ if (!my_base)
+ return NULL;
+
+#ifdef USE_MINI_EVENT
+ (void)sigs;
+ /* use mini event time-sharing feature */
+ my_base->base = event_init(time_secs, time_tv);
+#else
+ (void)time_secs;
+ (void)time_tv;
+# if defined(HAVE_EV_LOOP) || defined(HAVE_EV_DEFAULT_LOOP)
+ /* libev */
+ if(sigs)
+ my_base->base = (struct event_base*)ev_default_loop(EVFLAG_AUTO);
+ else
+ my_base->base = (struct event_base*)ev_loop_new(EVFLAG_AUTO);
+# else
+ (void)sigs;
+# ifdef HAVE_EVENT_BASE_NEW
+ my_base->base = event_base_new();
+# else
+ my_base->base = event_init();
+# endif
+# endif
+#endif
+ if (!my_base->base) {
+ free(my_base);
+ return NULL;
+ }
+ my_base->super.magic = UB_EVENT_MAGIC;
+ my_base->super.vmt = &default_event_base_vmt;
+ return &my_base->super;
+}
+
+struct ub_event_base*
+ub_libevent_event_base(struct event_base* base)
+{
+#ifdef USE_MINI_EVENT
+ (void)base;
+ return NULL;
+#else
+ struct my_event_base* my_base = (struct my_event_base*)calloc(1,
+ sizeof(struct my_event_base));
+
+ if (!my_base)
+ return NULL;
+ my_base->super.magic = UB_EVENT_MAGIC;
+ my_base->super.vmt = &default_event_base_vmt;
+ my_base->base = base;
+ return &my_base->super;
+#endif
+}
+
+struct event_base*
+ub_libevent_get_event_base(struct ub_event_base* base)
+{
+#ifndef USE_MINI_EVENT
+ if (base->vmt == &default_event_base_vmt)
+ return AS_MY_EVENT_BASE(base)->base;
+#else
+ (void)base;
+#endif
+ return NULL;
+}
+
+#if (defined(HAVE_EV_LOOP) || defined(HAVE_EV_DEFAULT_LOOP)) && defined(EVBACKEND_SELECT)
+static const char* ub_ev_backend2str_pluggable(int b)
+{
+ switch(b) {
+ case EVBACKEND_SELECT: return "select";
+ case EVBACKEND_POLL: return "poll";
+ case EVBACKEND_EPOLL: return "epoll";
+ case EVBACKEND_KQUEUE: return "kqueue";
+ case EVBACKEND_DEVPOLL: return "devpoll";
+ case EVBACKEND_PORT: return "evport";
+ }
+ return "unknown";
+}
+#endif
+
+void
+ub_get_event_sys(struct ub_event_base* ub_base, const char** n, const char** s,
+ const char** m)
+{
+#ifdef USE_WINSOCK
+ (void)ub_base;
+ *n = "pluggable-event";
+ *s = "winsock";
+ *m = "WSAWaitForMultipleEvents";
+#elif defined(USE_MINI_EVENT)
+ (void)ub_base;
+ *n = "pluggable-event";
+ *s = "internal";
+ *m = "select";
+#else
+ struct event_base* b = ub_libevent_get_event_base(ub_base);
+ /* This function is only called from comm_base_create, so
+ * ub_base is guaranteed to exist and to be the default
+ * event base.
+ */
+ assert(b);
+ *n = "pluggable-event";
+ *s = event_get_version();
+# if defined(HAVE_EV_LOOP) || defined(HAVE_EV_DEFAULT_LOOP)
+ *n = "pluggable-libev";
+# ifdef EVBACKEND_SELECT
+ *m = ub_ev_backend2str_pluggable(ev_backend((struct ev_loop*)b));
+# else
+ *m = "not obtainable";
+# endif
+# elif defined(HAVE_EVENT_BASE_GET_METHOD)
+ *n = "pluggable-libevent";
+ *m = event_base_get_method(b);
+# else
+ *m = "not obtainable";
+# endif
+#endif
+}
+
+void
+ub_event_base_free(struct ub_event_base* base)
+{
+ if (base && base->magic == UB_EVENT_MAGIC) {
+ fptr_ok(base->vmt != &default_event_base_vmt ||
+ base->vmt->free == my_event_base_free);
+ (*base->vmt->free)(base);
+ }
+}
+
+int
+ub_event_base_dispatch(struct ub_event_base* base)
+{
+ if (base->magic == UB_EVENT_MAGIC) {
+ fptr_ok(base->vmt != &default_event_base_vmt ||
+ base->vmt->dispatch == my_event_base_dispatch);
+ return (*base->vmt->dispatch)(base);
+ }
+ return -1;
+}
+
+int
+ub_event_base_loopexit(struct ub_event_base* base)
+{
+ if (base->magic == UB_EVENT_MAGIC) {
+ fptr_ok(base->vmt != &default_event_base_vmt ||
+ base->vmt->loopexit == my_event_base_loopexit);
+ return (*base->vmt->loopexit)(base, NULL);
+ }
+ return -1;
+}
+
+struct ub_event*
+ub_event_new(struct ub_event_base* base, int fd, short bits,
+ void (*cb)(int, short, void*), void* arg)
+{
+ if (base->magic == UB_EVENT_MAGIC) {
+ fptr_ok(base->vmt != &default_event_base_vmt ||
+ base->vmt->new_event == my_event_new);
+ return (*base->vmt->new_event)(base, fd, bits, cb, arg);
+ }
+ return NULL;
+}
+
+struct ub_event*
+ub_signal_new(struct ub_event_base* base, int fd,
+ void (*cb)(int, short, void*), void* arg)
+{
+ if (base->magic == UB_EVENT_MAGIC) {
+ fptr_ok(base->vmt != &default_event_base_vmt ||
+ base->vmt->new_signal == my_signal_new);
+ return (*base->vmt->new_signal)(base, fd, cb, arg);
+ }
+ return NULL;
+}
+
+struct ub_event*
+ub_winsock_register_wsaevent(struct ub_event_base* base, void* wsaevent,
+ void (*cb)(int, short, void*), void* arg)
+{
+ if (base->magic == UB_EVENT_MAGIC) {
+ fptr_ok(base->vmt != &default_event_base_vmt ||
+ base->vmt->winsock_register_wsaevent ==
+ my_winsock_register_wsaevent);
+ return (*base->vmt->winsock_register_wsaevent)(base, wsaevent, cb, arg);
+ }
+ return NULL;
+}
+
+void
+ub_event_add_bits(struct ub_event* ev, short bits)
+{
+ if (ev->magic == UB_EVENT_MAGIC) {
+ fptr_ok(ev->vmt != &default_event_vmt ||
+ ev->vmt->add_bits == my_event_add_bits);
+ (*ev->vmt->add_bits)(ev, bits);
+ }
+}
+
+void
+ub_event_del_bits(struct ub_event* ev, short bits)
+{
+ if (ev->magic == UB_EVENT_MAGIC) {
+ fptr_ok(ev->vmt != &default_event_vmt ||
+ ev->vmt->del_bits == my_event_del_bits);
+ (*ev->vmt->del_bits)(ev, bits);
+ }
+}
+
+void
+ub_event_set_fd(struct ub_event* ev, int fd)
+{
+ if (ev->magic == UB_EVENT_MAGIC) {
+ fptr_ok(ev->vmt != &default_event_vmt ||
+ ev->vmt->set_fd == my_event_set_fd);
+ (*ev->vmt->set_fd)(ev, fd);
+ }
+}
+
+void
+ub_event_free(struct ub_event* ev)
+{
+ if (ev && ev->magic == UB_EVENT_MAGIC) {
+ fptr_ok(ev->vmt != &default_event_vmt ||
+ ev->vmt->free == my_event_free);
+ (*ev->vmt->free)(ev);
+ }
+}
+
+int
+ub_event_add(struct ub_event* ev, struct timeval* tv)
+{
+ if (ev->magic == UB_EVENT_MAGIC) {
+ fptr_ok(ev->vmt != &default_event_vmt ||
+ ev->vmt->add == my_event_add);
+ return (*ev->vmt->add)(ev, tv);
+ }
+ return -1;
+}
+
+int
+ub_event_del(struct ub_event* ev)
+{
+ if (ev->magic == UB_EVENT_MAGIC) {
+ fptr_ok(ev->vmt != &default_event_vmt ||
+ ev->vmt->del == my_event_del);
+ return (*ev->vmt->del)(ev);
+ }
+ return -1;
+}
+
+int
+ub_timer_add(struct ub_event* ev, struct ub_event_base* base,
+ void (*cb)(int, short, void*), void* arg, struct timeval* tv)
+{
+ if (ev->magic == UB_EVENT_MAGIC) {
+ fptr_ok(ev->vmt != &default_event_vmt ||
+ ev->vmt->add_timer == my_timer_add);
+ return (*ev->vmt->add_timer)(ev, base, cb, arg, tv);
+ }
+ return -1;
+}
+
+int
+ub_timer_del(struct ub_event* ev)
+{
+ if (ev->magic == UB_EVENT_MAGIC) {
+ fptr_ok(ev->vmt != &default_event_vmt ||
+ ev->vmt->del_timer == my_timer_del);
+ return (*ev->vmt->del_timer)(ev);
+ }
+ return -1;
+}
+
+int
+ub_signal_add(struct ub_event* ev, struct timeval* tv)
+{
+ if (ev->magic == UB_EVENT_MAGIC) {
+ fptr_ok(ev->vmt != &default_event_vmt ||
+ ev->vmt->add_signal == my_signal_add);
+ return (*ev->vmt->add_signal)(ev, tv);
+ }
+ return -1;
+}
+
+int
+ub_signal_del(struct ub_event* ev)
+{
+ if (ev->magic == UB_EVENT_MAGIC) {
+ fptr_ok(ev->vmt != &default_event_vmt ||
+ ev->vmt->del_signal == my_signal_del);
+ return (*ev->vmt->del_signal)(ev);
+ }
+ return -1;
+}
+
+void
+ub_winsock_unregister_wsaevent(struct ub_event* ev)
+{
+ if (ev->magic == UB_EVENT_MAGIC) {
+ fptr_ok(ev->vmt != &default_event_vmt ||
+ ev->vmt->winsock_unregister_wsaevent ==
+ my_winsock_unregister_wsaevent);
+ (*ev->vmt->winsock_unregister_wsaevent)(ev);
+ }
+}
+
+void
+ub_winsock_tcp_wouldblock(struct ub_event* ev, int eventbits)
+{
+ if (ev->magic == UB_EVENT_MAGIC) {
+ fptr_ok(ev->vmt != &default_event_vmt ||
+ ev->vmt->winsock_tcp_wouldblock ==
+ my_winsock_tcp_wouldblock);
+ (*ev->vmt->winsock_tcp_wouldblock)(ev, eventbits);
+ }
+}
+
+void ub_comm_base_now(struct comm_base* cb)
+{
+ time_t *tt;
+ struct timeval *tv;
+
+#ifdef USE_MINI_EVENT
+/** minievent updates the time when it blocks. */
+ if (comm_base_internal(cb)->magic == UB_EVENT_MAGIC &&
+ comm_base_internal(cb)->vmt == &default_event_base_vmt)
+ return; /* Actually using mini event, so do not set time */
+#endif /* USE_MINI_EVENT */
+
+/** fillup the time values in the event base */
+ comm_base_timept(cb, &tt, &tv);
+ if(gettimeofday(tv, NULL) < 0) {
+ log_err("gettimeofday: %s", strerror(errno));
+ }
+ *tt = tv->tv_sec;
+}
+
edns.ext_rcode = 0;
edns.edns_version = 0;
edns.bits = EDNS_DO;
+ edns.opt_list = NULL;
if(sldns_buffer_capacity(buf) < 65535)
edns.udp_size = (uint16_t)sldns_buffer_capacity(buf);
else edns.udp_size = 65535;
")", b);
(void)rbtree_delete(anchors->tree, &ta->node);
lock_basic_unlock(&ta->lock);
+ if(anchors->dlv_anchor == ta)
+ anchors->dlv_anchor = NULL;
anchors_delfunc(&ta->node, NULL);
ta = next;
continue;
}
}
+void
+secalgo_hash_sha256(unsigned char* buf, size_t len, unsigned char* res)
+{
+ (void)SHA256(buf, len, res);
+}
+
/**
* Return size of DS digest according to its hash algorithm.
* @param algo: DS digest algo.
case LDNS_RSAMD5:
/* RFC 6725 deprecates RSAMD5 */
return 0;
+#ifdef USE_DSA
case LDNS_DSA:
case LDNS_DSA_NSEC3:
+#endif
case LDNS_RSASHA1:
case LDNS_RSASHA1_NSEC3:
#if defined(HAVE_EVP_SHA256) && defined(USE_SHA2)
log_err("%s crypto %s", str, buf);
}
+#ifdef USE_DSA
/**
* Setup DSA key digest in DER encoding ...
* @param sig: input is signature output alloced ptr (unless failure).
DSA_SIG_free(dsasig);
return 1;
}
+#endif /* USE_DSA */
#ifdef USE_ECDSA
/**
static int
setup_ecdsa_sig(unsigned char** sig, unsigned int* len)
{
- ECDSA_SIG* ecdsa_sig;
- int newlen;
+ /* convert from two BIGNUMs in the rdata buffer, to ASN notation.
+ * ASN preable: 30440220 <R 32bytefor256> 0220 <S 32bytefor256>
+ * the '20' is the length of that field (=bnsize).
+i * the '44' is the total remaining length.
+ * if negative, start with leading zero.
+ * if starts with 00s, remove them from the number.
+ */
+ uint8_t pre[] = {0x30, 0x44, 0x02, 0x20};
+ int pre_len = 4;
+ uint8_t mid[] = {0x02, 0x20};
+ int mid_len = 2;
+ int raw_sig_len, r_high, s_high, r_rem=0, s_rem=0;
int bnsize = (int)((*len)/2);
+ unsigned char* d = *sig;
+ uint8_t* p;
/* if too short or not even length, fails */
if(*len < 16 || bnsize*2 != (int)*len)
return 0;
- /* use the raw data to parse two evenly long BIGNUMs, "r | s". */
- ecdsa_sig = ECDSA_SIG_new();
- if(!ecdsa_sig) return 0;
- ecdsa_sig->r = BN_bin2bn(*sig, bnsize, ecdsa_sig->r);
- ecdsa_sig->s = BN_bin2bn(*sig+bnsize, bnsize, ecdsa_sig->s);
- if(!ecdsa_sig->r || !ecdsa_sig->s) {
- ECDSA_SIG_free(ecdsa_sig);
- return 0;
- }
- /* spool it into ASN format */
- *sig = NULL;
- newlen = i2d_ECDSA_SIG(ecdsa_sig, sig);
- if(newlen <= 0) {
- ECDSA_SIG_free(ecdsa_sig);
- free(*sig);
+ /* strip leading zeroes from r (but not last one) */
+ while(r_rem < bnsize-1 && d[r_rem] == 0)
+ r_rem++;
+ /* strip leading zeroes from s (but not last one) */
+ while(s_rem < bnsize-1 && d[bnsize+s_rem] == 0)
+ s_rem++;
+
+ r_high = ((d[0+r_rem]&0x80)?1:0);
+ s_high = ((d[bnsize+s_rem]&0x80)?1:0);
+ raw_sig_len = pre_len + r_high + bnsize - r_rem + mid_len +
+ s_high + bnsize - s_rem;
+ *sig = (unsigned char*)malloc((size_t)raw_sig_len);
+ if(!*sig)
return 0;
+ p = (uint8_t*)*sig;
+ p[0] = pre[0];
+ p[1] = (uint8_t)(raw_sig_len-2);
+ p[2] = pre[2];
+ p[3] = (uint8_t)(bnsize + r_high - r_rem);
+ p += 4;
+ if(r_high) {
+ *p = 0;
+ p += 1;
}
- *len = (unsigned int)newlen;
- ECDSA_SIG_free(ecdsa_sig);
+ memmove(p, d+r_rem, (size_t)bnsize-r_rem);
+ p += bnsize-r_rem;
+ memmove(p, mid, (size_t)mid_len-1);
+ p += mid_len-1;
+ *p = (uint8_t)(bnsize + s_high - s_rem);
+ p += 1;
+ if(s_high) {
+ *p = 0;
+ p += 1;
+ }
+ memmove(p, d+bnsize+s_rem, (size_t)bnsize-s_rem);
+ *len = (unsigned int)raw_sig_len;
return 1;
}
#endif /* USE_ECDSA */
setup_key_digest(int algo, EVP_PKEY** evp_key, const EVP_MD** digest_type,
unsigned char* key, size_t keylen)
{
+#ifdef USE_DSA
DSA* dsa;
+#endif
RSA* rsa;
switch(algo) {
+#ifdef USE_DSA
case LDNS_DSA:
case LDNS_DSA_NSEC3:
*evp_key = EVP_PKEY_new();
*digest_type = EVP_dss1();
break;
+#endif /* USE_DSA */
case LDNS_RSASHA1:
case LDNS_RSASHA1_NSEC3:
#if defined(HAVE_EVP_SHA256) && defined(USE_SHA2)
char** reason)
{
const EVP_MD *digest_type;
- EVP_MD_CTX ctx;
- int res, dofree = 0;
+ EVP_MD_CTX* ctx;
+ int res, dofree = 0, docrypto_free = 0;
EVP_PKEY *evp_key = NULL;
if(!setup_key_digest(algo, &evp_key, &digest_type, key, keylen)) {
EVP_PKEY_free(evp_key);
return sec_status_bogus;
}
+#ifdef USE_DSA
/* if it is a DSA signature in bind format, convert to DER format */
if((algo == LDNS_DSA || algo == LDNS_DSA_NSEC3) &&
sigblock_len == 1+2*SHA_DIGEST_LENGTH) {
EVP_PKEY_free(evp_key);
return sec_status_bogus;
}
- dofree = 1;
+ docrypto_free = 1;
}
+#endif
+#if defined(USE_ECDSA) && defined(USE_DSA)
+ else
+#endif
#ifdef USE_ECDSA
- else if(algo == LDNS_ECDSAP256SHA256 || algo == LDNS_ECDSAP384SHA384) {
+ if(algo == LDNS_ECDSAP256SHA256 || algo == LDNS_ECDSAP384SHA384) {
/* EVP uses ASN prefix on sig, which is not in the wire data */
if(!setup_ecdsa_sig(&sigblock, &sigblock_len)) {
verbose(VERB_QUERY, "verify: failed to setup ECDSA sig");
#endif /* USE_ECDSA */
/* do the signature cryptography work */
- EVP_MD_CTX_init(&ctx);
- if(EVP_VerifyInit(&ctx, digest_type) == 0) {
- verbose(VERB_QUERY, "verify: EVP_VerifyInit failed");
+#ifdef HAVE_EVP_MD_CTX_NEW
+ ctx = EVP_MD_CTX_new();
+#else
+ ctx = (EVP_MD_CTX*)malloc(sizeof(*ctx));
+ if(ctx) EVP_MD_CTX_init(ctx);
+#endif
+ if(!ctx) {
+ log_err("EVP_MD_CTX_new: malloc failure");
EVP_PKEY_free(evp_key);
if(dofree) free(sigblock);
+ else if(docrypto_free) CRYPTO_free(sigblock);
return sec_status_unchecked;
}
- if(EVP_VerifyUpdate(&ctx, (unsigned char*)sldns_buffer_begin(buf),
- (unsigned int)sldns_buffer_limit(buf)) == 0) {
- verbose(VERB_QUERY, "verify: EVP_VerifyUpdate failed");
+ if(EVP_VerifyInit(ctx, digest_type) == 0) {
+ verbose(VERB_QUERY, "verify: EVP_VerifyInit failed");
+ EVP_MD_CTX_destroy(ctx);
EVP_PKEY_free(evp_key);
if(dofree) free(sigblock);
+ else if(docrypto_free) CRYPTO_free(sigblock);
return sec_status_unchecked;
}
-
- res = EVP_VerifyFinal(&ctx, sigblock, sigblock_len, evp_key);
- if(EVP_MD_CTX_cleanup(&ctx) == 0) {
- verbose(VERB_QUERY, "verify: EVP_MD_CTX_cleanup failed");
+ if(EVP_VerifyUpdate(ctx, (unsigned char*)sldns_buffer_begin(buf),
+ (unsigned int)sldns_buffer_limit(buf)) == 0) {
+ verbose(VERB_QUERY, "verify: EVP_VerifyUpdate failed");
+ EVP_MD_CTX_destroy(ctx);
EVP_PKEY_free(evp_key);
if(dofree) free(sigblock);
+ else if(docrypto_free) CRYPTO_free(sigblock);
return sec_status_unchecked;
}
+
+ res = EVP_VerifyFinal(ctx, sigblock, sigblock_len, evp_key);
+#ifdef HAVE_EVP_MD_CTX_NEW
+ EVP_MD_CTX_destroy(ctx);
+#else
+ EVP_MD_CTX_cleanup(ctx);
+ free(ctx);
+#endif
EVP_PKEY_free(evp_key);
- if(dofree)
- free(sigblock);
+ if(dofree) free(sigblock);
+ else if(docrypto_free) CRYPTO_free(sigblock);
if(res == 1) {
return sec_status_secure;
}
}
+void
+secalgo_hash_sha256(unsigned char* buf, size_t len, unsigned char* res)
+{
+ (void)HASH_HashBuf(HASH_AlgSHA256, res, buf, (unsigned long)len);
+}
+
size_t
ds_digest_size_supported(int algo)
{
case LDNS_RSAMD5:
/* RFC 6725 deprecates RSAMD5 */
return 0;
+#ifdef USE_DSA
case LDNS_DSA:
case LDNS_DSA_NSEC3:
+#endif
case LDNS_RSASHA1:
case LDNS_RSASHA1_NSEC3:
#ifdef USE_SHA2
*/
switch(algo) {
+#ifdef USE_DSA
case LDNS_DSA:
case LDNS_DSA_NSEC3:
*pubkey = nss_buf2dsa(key, keylen);
*htype = HASH_AlgSHA1;
/* no prefix for DSA verification */
break;
+#endif
case LDNS_RSASHA1:
case LDNS_RSASHA1_NSEC3:
#ifdef USE_SHA2
return sec_status_bogus;
}
+#ifdef USE_DSA
/* need to convert DSA, ECDSA signatures? */
if((algo == LDNS_DSA || algo == LDNS_DSA_NSEC3)) {
if(sigblock_len == 1+2*SHA1_LENGTH) {
SECITEM_FreeItem(p, PR_TRUE);
}
}
+#endif /* USE_DSA */
/* do the signature cryptography work */
/* hash the data */
}
}
+void
+secalgo_hash_sha256(unsigned char* buf, size_t len, unsigned char* res)
+{
+ _digest_nettle(SHA256_DIGEST_SIZE, (uint8_t*)buf, len, res);
+}
+
/**
* Return size of DS digest according to its hash algorithm.
* @param algo: DS digest algo.
{
/* uses libnettle */
switch(id) {
+#ifdef USE_DSA
case LDNS_DSA:
case LDNS_DSA_NSEC3:
+#endif
case LDNS_RSASHA1:
case LDNS_RSASHA1_NSEC3:
#ifdef USE_SHA2
}
switch(algo) {
+#ifdef USE_DSA
case LDNS_DSA:
case LDNS_DSA_NSEC3:
*reason = _verify_nettle_dsa(buf, sigblock, sigblock_len, key, keylen);
return sec_status_bogus;
else
return sec_status_secure;
+#endif /* USE_DSA */
case LDNS_RSASHA1:
case LDNS_RSASHA1_NSEC3:
int secalgo_nsec3_hash(int algo, unsigned char* buf, size_t len,
unsigned char* res);
+/**
+ * Calculate the sha256 hash for the data buffer into the result.
+ * @param buf: buffer to digest.
+ * @param len: length of the buffer to digest.
+ * @param res: result is stored here (space 256/8 bytes).
+ */
+void secalgo_hash_sha256(unsigned char* buf, size_t len, unsigned char* res);
+
/**
* Return size of DS digest according to its hash algorithm.
* @param algo: DS digest algo.