-/* $OpenBSD: uipc_socket.c,v 1.328 2024/04/10 12:04:41 mvs Exp $ */
+/* $OpenBSD: uipc_socket.c,v 1.329 2024/04/11 13:32:51 mvs Exp $ */
/* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */
/*
void soput(void *);
int somove(struct socket *, int);
void sorflush(struct socket *);
+void sorflush_locked(struct socket *);
void filt_sordetach(struct knote *kn);
int filt_soread(struct knote *kn, long hint);
return (NULL);
rw_init_flags(&so->so_lock, dp->dom_name, RWL_DUPOK);
refcnt_init(&so->so_refcnt);
+ rw_init(&so->so_rcv.sb_lock, "sbufrcv");
+ rw_init(&so->so_snd.sb_lock, "sbufsnd");
mtx_init(&so->so_rcv.sb_mtx, IPL_MPFLOOR);
mtx_init(&so->so_snd.sb_mtx, IPL_MPFLOOR);
klist_init_mutex(&so->so_rcv.sb_klist, &so->so_rcv.sb_mtx);
case AF_INET6:
switch (prp->pr_type) {
case SOCK_DGRAM:
- so->so_rcv.sb_flags |= SB_OWNLOCK;
- /* FALLTHROUGH */
- case SOCK_RAW:
so->so_rcv.sb_flags |= SB_MTXLOCK;
break;
+ case SOCK_RAW:
+ so->so_rcv.sb_flags |= SB_MTXLOCK | SB_OWNLOCK;
+ break;
}
break;
case AF_UNIX:
- so->so_rcv.sb_flags |= SB_MTXLOCK | SB_OWNLOCK;
+ so->so_rcv.sb_flags |= SB_MTXLOCK;
break;
}
}
#endif /* SOCKET_SPLICE */
sbrelease(so, &so->so_snd);
- sorflush(so);
- if (!keep_lock)
+
+ /*
+ * Regardless on '_locked' postfix, must release solock() before
+ * call sorflush_locked() for SB_OWNLOCK marked socket. Can't
+ * release solock() and call sorflush() because solock() release
+ * is unwanted for tcp(4) socket.
+ */
+
+ if (so->so_rcv.sb_flags & SB_OWNLOCK)
+ sounlock(so);
+
+ sorflush_locked(so);
+
+ if (!((so->so_rcv.sb_flags & SB_OWNLOCK) || keep_lock))
sounlock(so);
+
#ifdef SOCKET_SPLICE
if (so->so_sp) {
/* Reuse splice idle, sounsplice() has been called before. */
const struct protosw *pr = so->so_proto;
struct mbuf *nextrecord;
size_t resid, orig_resid = uio->uio_resid;
+ int dosolock = ((so->so_rcv.sb_flags & SB_OWNLOCK) == 0);
mp = mp0;
if (paddr)
if (mp)
*mp = NULL;
- solock_shared(so);
+ if (dosolock)
+ solock_shared(so);
restart:
- if ((error = sblock(so, &so->so_rcv, SBLOCKWAIT(flags))) != 0) {
- sounlock_shared(so);
- return (error);
- }
+ if ((error = sblock(so, &so->so_rcv, SBLOCKWAIT(flags))) != 0)
+ goto out;
sb_mtx_lock(&so->so_rcv);
m = so->so_rcv.sb_mb;
SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1");
SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1");
- if (so->so_rcv.sb_flags & SB_OWNLOCK) {
+ if (so->so_rcv.sb_flags & (SB_MTXLOCK | SB_OWNLOCK)) {
sbunlock_locked(so, &so->so_rcv);
- sounlock_shared(so);
+ if (dosolock)
+ sounlock_shared(so);
error = sbwait_locked(so, &so->so_rcv);
sb_mtx_unlock(&so->so_rcv);
if (error)
return (error);
- solock_shared(so);
+ if (dosolock)
+ solock_shared(so);
} else {
sb_mtx_unlock(&so->so_rcv);
sbunlock(so, &so->so_rcv);
if (controlp) {
if (pr->pr_domain->dom_externalize) {
sb_mtx_unlock(&so->so_rcv);
- sounlock_shared(so);
+ if (dosolock)
+ sounlock_shared(so);
error =
(*pr->pr_domain->dom_externalize)
(cm, controllen, flags);
- solock_shared(so);
+ if (dosolock)
+ solock_shared(so);
sb_mtx_lock(&so->so_rcv);
}
*controlp = cm;
SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove");
resid = uio->uio_resid;
sb_mtx_unlock(&so->so_rcv);
- sounlock_shared(so);
+ if (dosolock)
+ sounlock_shared(so);
uio_error = uiomove(mtod(m, caddr_t) + moff, len, uio);
- solock_shared(so);
+ if (dosolock)
+ solock_shared(so);
sb_mtx_lock(&so->so_rcv);
if (uio_error)
uio->uio_resid = resid - len;
break;
SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2");
SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2");
- sb_mtx_unlock(&so->so_rcv);
- error = sbwait(so, &so->so_rcv);
- if (error) {
- sbunlock(so, &so->so_rcv);
- sounlock_shared(so);
- return (0);
+ if (dosolock) {
+ sb_mtx_unlock(&so->so_rcv);
+ error = sbwait(so, &so->so_rcv);
+ if (error) {
+ sbunlock(so, &so->so_rcv);
+ sounlock_shared(so);
+ return (0);
+ }
+ sb_mtx_lock(&so->so_rcv);
+ } else {
+ if (sbwait_locked(so, &so->so_rcv)) {
+ sb_mtx_unlock(&so->so_rcv);
+ sbunlock(so, &so->so_rcv);
+ return (0);
+ }
}
- sb_mtx_lock(&so->so_rcv);
if ((m = so->so_rcv.sb_mb) != NULL)
nextrecord = m->m_nextpkt;
}
release:
sb_mtx_unlock(&so->so_rcv);
sbunlock(so, &so->so_rcv);
- sounlock_shared(so);
+out:
+ if (dosolock)
+ sounlock_shared(so);
return (error);
}
{
int error = 0;
- solock(so);
switch (how) {
case SHUT_RD:
sorflush(so);
sorflush(so);
/* FALLTHROUGH */
case SHUT_WR:
+ solock(so);
error = pru_shutdown(so);
+ sounlock(so);
break;
default:
error = EINVAL;
break;
}
- sounlock(so);
return (error);
}
void
-sorflush(struct socket *so)
+sorflush_locked(struct socket *so)
{
struct sockbuf *sb = &so->so_rcv;
struct mbuf *m;
const struct protosw *pr = so->so_proto;
int error;
+ if ((sb->sb_flags & SB_OWNLOCK) == 0)
+ soassertlocked(so);
+
error = sblock(so, sb, SBL_WAIT | SBL_NOINTR);
/* with SBL_WAIT and SLB_NOINTR sblock() must not fail */
KASSERT(error == 0);
m_purge(m);
}
+void
+sorflush(struct socket *so)
+{
+ if ((so->so_rcv.sb_flags & SB_OWNLOCK) == 0)
+ solock_shared(so);
+ sorflush_locked(so);
+ if ((so->so_rcv.sb_flags & SB_OWNLOCK) == 0)
+ sounlock_shared(so);
+}
+
#ifdef SOCKET_SPLICE
#define so_splicelen so_sp->ssp_len
if ((long)cnt <= 0)
cnt = 1;
- solock(so);
+ if (((sb->sb_flags & SB_OWNLOCK) == 0))
+ solock(so);
mtx_enter(&sb->sb_mtx);
switch (optname) {
}
mtx_leave(&sb->sb_mtx);
- sounlock(so);
+ if (((sb->sb_flags & SB_OWNLOCK) == 0))
+ sounlock(so);
break;
}
-/* $OpenBSD: uipc_socket2.c,v 1.148 2024/04/10 12:04:41 mvs Exp $ */
+/* $OpenBSD: uipc_socket2.c,v 1.149 2024/04/11 13:32:51 mvs Exp $ */
/* $NetBSD: uipc_socket2.c,v 1.11 1996/02/04 02:17:55 christos Exp $ */
/*
void
socantrcvmore(struct socket *so)
{
- soassertlocked(so);
+ if ((so->so_rcv.sb_flags & SB_OWNLOCK) == 0)
+ soassertlocked(so);
+
mtx_enter(&so->so_rcv.sb_mtx);
so->so_rcv.sb_state |= SS_CANTRCVMORE;
mtx_leave(&so->so_rcv.sb_mtx);
{
int error = 0, prio = PSOCK;
+ if (sb->sb_flags & SB_OWNLOCK) {
+ int rwflags = RW_WRITE;
+
+ if (!(flags & SBL_NOINTR || sb->sb_flags & SB_NOINTR))
+ rwflags |= RW_INTR;
+ if (!(flags & SBL_WAIT))
+ rwflags |= RW_NOSLEEP;
+
+ return rw_enter(&sb->sb_lock, rwflags);
+ }
+
soassertlocked(so);
mtx_enter(&sb->sb_mtx);
void
sbunlock_locked(struct socket *so, struct sockbuf *sb)
{
+ if (sb->sb_flags & SB_OWNLOCK) {
+ rw_exit(&sb->sb_lock);
+ return;
+ }
+
MUTEX_ASSERT_LOCKED(&sb->sb_mtx);
sb->sb_flags &= ~SB_LOCK;
void
sbunlock(struct socket *so, struct sockbuf *sb)
{
+ if (sb->sb_flags & SB_OWNLOCK) {
+ rw_exit(&sb->sb_lock);
+ return;
+ }
+
mtx_enter(&sb->sb_mtx);
sbunlock_locked(so, sb);
mtx_leave(&sb->sb_mtx);