mirror of
https://git.rtems.org/rtems-libbsd/
synced 2025-07-23 03:13:58 +08:00
Update to FreeBSD stable/12 2019-07-09
Git mirror commit 3427c3416aa3c0f25124070959cca78024b94d85.
This commit is contained in:
parent
312f705d4f
commit
5283630d2c
@ -1 +1 @@
|
||||
Subproject commit aa83598855d14cdbf7aef6b05d0617e90f87ca2f
|
||||
Subproject commit 3427c3416aa3c0f25124070959cca78024b94d85
|
@ -180,6 +180,7 @@ typedef enum {
|
||||
PJT_MUTEX_CONSISTENT,
|
||||
PJT_MUTEXATTR_GETROBUST,
|
||||
PJT_MUTEXATTR_SETROBUST,
|
||||
PJT_GETTHREADID_NP,
|
||||
PJT_MAX
|
||||
} pjt_index_t;
|
||||
|
||||
|
@ -136,6 +136,35 @@ caph_enter(void)
|
||||
return (0);
|
||||
}
|
||||
|
||||
static __inline int
|
||||
caph_rights_limit(int fd, const cap_rights_t *rights)
|
||||
{
|
||||
|
||||
if (cap_rights_limit(fd, rights) < 0 && errno != ENOSYS)
|
||||
return (-1);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static __inline int
|
||||
caph_ioctls_limit(int fd, const unsigned long *cmds, size_t ncmds)
|
||||
{
|
||||
|
||||
if (cap_ioctls_limit(fd, cmds, ncmds) < 0 && errno != ENOSYS)
|
||||
return (-1);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static __inline int
|
||||
caph_fcntls_limit(int fd, uint32_t fcntlrights)
|
||||
{
|
||||
|
||||
if (cap_fcntls_limit(fd, fcntlrights) < 0 && errno != ENOSYS)
|
||||
return (-1);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static __inline int
|
||||
caph_enter_casper(void)
|
||||
|
@ -898,6 +898,5 @@ do_packet(struct interface_info *interface, struct dhcp_packet *packet,
|
||||
|
||||
/* Free the data associated with the options. */
|
||||
for (i = 0; i < 256; i++)
|
||||
if (tp.options[i].len && tp.options[i].data)
|
||||
free(tp.options[i].data);
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ decode_udp_ip_header(unsigned char *buf, int bufix, struct sockaddr_in *from,
|
||||
ip_packets_seen++;
|
||||
if (wrapsum(checksum(buf + bufix, ip_len, 0)) != 0) {
|
||||
ip_packets_bad_checksum++;
|
||||
if (ip_packets_seen > 4 &&
|
||||
if (ip_packets_seen > 4 && ip_packets_bad_checksum != 0 &&
|
||||
(ip_packets_seen / ip_packets_bad_checksum) < 2) {
|
||||
note("%d bad IP checksums seen in %d packets",
|
||||
ip_packets_bad_checksum, ip_packets_seen);
|
||||
@ -237,7 +237,7 @@ decode_udp_ip_header(unsigned char *buf, int bufix, struct sockaddr_in *from,
|
||||
udp_packets_seen++;
|
||||
if (usum && usum != sum) {
|
||||
udp_packets_bad_checksum++;
|
||||
if (udp_packets_seen > 4 &&
|
||||
if (udp_packets_seen > 4 && udp_packets_bad_checksum != 0 &&
|
||||
(udp_packets_seen / udp_packets_bad_checksum) < 2) {
|
||||
note("%d bad udp checksums in %d packets",
|
||||
udp_packets_bad_checksum, udp_packets_seen);
|
||||
|
@ -5584,6 +5584,7 @@ scsi_devid_is_naa_ieee_reg(uint8_t *bufp)
|
||||
{
|
||||
struct scsi_vpd_id_descriptor *descr;
|
||||
struct scsi_vpd_id_naa_basic *naa;
|
||||
int n;
|
||||
|
||||
descr = (struct scsi_vpd_id_descriptor *)bufp;
|
||||
naa = (struct scsi_vpd_id_naa_basic *)descr->identifier;
|
||||
@ -5591,7 +5592,8 @@ scsi_devid_is_naa_ieee_reg(uint8_t *bufp)
|
||||
return 0;
|
||||
if (descr->length < sizeof(struct scsi_vpd_id_naa_ieee_reg))
|
||||
return 0;
|
||||
if ((naa->naa >> SVPD_ID_NAA_NAA_SHIFT) != SVPD_ID_NAA_IEEE_REG)
|
||||
n = naa->naa >> SVPD_ID_NAA_NAA_SHIFT;
|
||||
if (n != SVPD_ID_NAA_LOCAL_REG && n != SVPD_ID_NAA_IEEE_REG)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
@ -858,6 +858,7 @@ pci_bar_mmap(device_t pcidev, struct pci_bar_mmap *pbm)
|
||||
struct thread *td;
|
||||
struct sglist *sg;
|
||||
struct pci_map *pm;
|
||||
vm_paddr_t membase;
|
||||
vm_paddr_t pbase;
|
||||
vm_size_t plen;
|
||||
vm_offset_t addr;
|
||||
@ -880,8 +881,9 @@ pci_bar_mmap(device_t pcidev, struct pci_bar_mmap *pbm)
|
||||
return (EBUSY); /* XXXKIB enable if _ACTIVATE */
|
||||
if (!PCI_BAR_MEM(pm->pm_value))
|
||||
return (EIO);
|
||||
pbase = trunc_page(pm->pm_value);
|
||||
plen = round_page(pm->pm_value + ((pci_addr_t)1 << pm->pm_size)) -
|
||||
membase = pm->pm_value & PCIM_BAR_MEM_BASE;
|
||||
pbase = trunc_page(membase);
|
||||
plen = round_page(membase + ((pci_addr_t)1 << pm->pm_size)) -
|
||||
pbase;
|
||||
prot = VM_PROT_READ | (((pbm->pbm_flags & PCIIO_BAR_MMAP_RW) != 0) ?
|
||||
VM_PROT_WRITE : 0);
|
||||
@ -913,7 +915,7 @@ pci_bar_mmap(device_t pcidev, struct pci_bar_mmap *pbm)
|
||||
}
|
||||
pbm->pbm_map_base = (void *)addr;
|
||||
pbm->pbm_map_length = plen;
|
||||
pbm->pbm_bar_off = pm->pm_value - pbase;
|
||||
pbm->pbm_bar_off = membase - pbase;
|
||||
pbm->pbm_bar_length = (pci_addr_t)1 << pm->pm_size;
|
||||
|
||||
out:
|
||||
|
@ -138,7 +138,9 @@ int
|
||||
_sleep(void *ident, struct lock_object *lock, int priority,
|
||||
const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
|
||||
{
|
||||
#ifndef __rtems__
|
||||
struct thread *td;
|
||||
#endif /* __rtems__ */
|
||||
struct lock_class *class;
|
||||
uintptr_t lock_state;
|
||||
#ifndef __rtems__
|
||||
@ -148,7 +150,9 @@ _sleep(void *ident, struct lock_object *lock, int priority,
|
||||
#endif /* __rtems__ */
|
||||
WITNESS_SAVE_DECL(lock_witness);
|
||||
|
||||
#ifndef __rtems__
|
||||
td = curthread;
|
||||
#endif /* __rtems__ */
|
||||
#ifdef KTRACE
|
||||
if (KTRPOINT(td, KTR_CSW))
|
||||
ktrcsw(1, 0, wmesg);
|
||||
@ -402,6 +406,23 @@ wakeup_one(void *ident)
|
||||
kick_proc0();
|
||||
}
|
||||
|
||||
void
|
||||
wakeup_any(void *ident)
|
||||
#ifndef __rtems__
|
||||
{
|
||||
int wakeup_swapper;
|
||||
|
||||
sleepq_lock(ident);
|
||||
wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP | SLEEPQ_UNFAIR,
|
||||
0, 0);
|
||||
sleepq_release(ident);
|
||||
if (wakeup_swapper)
|
||||
kick_proc0();
|
||||
}
|
||||
#else /* __rtems__ */
|
||||
RTEMS_ALIAS(_bsd_wakeup_one);
|
||||
#endif /* __rtems__ */
|
||||
|
||||
#ifndef __rtems__
|
||||
static void
|
||||
kdb_switch(void)
|
||||
|
@ -301,7 +301,7 @@ sbuf_printf_uuid(struct sbuf *sb, struct uuid *uuid)
|
||||
char buf[38];
|
||||
|
||||
snprintf_uuid(buf, sizeof(buf), uuid);
|
||||
return (sbuf_printf(sb, "%s", buf));
|
||||
return (sbuf_cat(sb, buf));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -343,6 +343,21 @@ sbuf_setpos(struct sbuf *s, ssize_t pos)
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Drain into a counter. Counts amount of data without producing output.
|
||||
* Useful for cases like sysctl, where user may first request only size.
|
||||
* This allows to avoid pointless allocation/freeing of large buffers.
|
||||
*/
|
||||
int
|
||||
sbuf_count_drain(void *arg, const char *data __unused, int len)
|
||||
{
|
||||
size_t *sizep;
|
||||
|
||||
sizep = (size_t *)arg;
|
||||
*sizep += len;
|
||||
return (len);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up a drain function and argument on an sbuf to flush data to
|
||||
* when the sbuf buffer overflows.
|
||||
|
@ -132,7 +132,7 @@ CTASSERT(powerof2(SC_TABLESIZE));
|
||||
* c - sleep queue chain lock
|
||||
*/
|
||||
struct sleepqueue {
|
||||
TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */
|
||||
struct threadqueue sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */
|
||||
u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */
|
||||
LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
|
||||
LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
|
||||
@ -1139,13 +1139,15 @@ sleepq_init(void *mem, int size, int flags)
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the highest priority thread sleeping on a wait channel and resume it.
|
||||
* Find thread sleeping on a wait channel and resume it.
|
||||
*/
|
||||
int
|
||||
sleepq_signal(void *wchan, int flags, int pri, int queue)
|
||||
{
|
||||
struct sleepqueue_chain *sc;
|
||||
struct sleepqueue *sq;
|
||||
#ifndef __rtems__
|
||||
struct threadqueue *head;
|
||||
struct thread *td, *besttd;
|
||||
#else /* __rtems__ */
|
||||
struct thread *besttd;
|
||||
@ -1162,17 +1164,34 @@ sleepq_signal(void *wchan, int flags, int pri, int queue)
|
||||
("%s: mismatch between sleep/wakeup and cv_*", __func__));
|
||||
|
||||
#ifndef __rtems__
|
||||
head = &sq->sq_blocked[queue];
|
||||
if (flags & SLEEPQ_UNFAIR) {
|
||||
/*
|
||||
* Find the highest priority thread on the queue. If there is a
|
||||
* tie, use the thread that first appears in the queue as it has
|
||||
* been sleeping the longest since threads are always added to
|
||||
* the tail of sleep queues.
|
||||
* Find the most recently sleeping thread, but try to
|
||||
* skip threads still in process of context switch to
|
||||
* avoid spinning on the thread lock.
|
||||
*/
|
||||
besttd = TAILQ_FIRST(&sq->sq_blocked[queue]);
|
||||
TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) {
|
||||
sc = SC_LOOKUP(wchan);
|
||||
besttd = TAILQ_LAST_FAST(head, thread, td_slpq);
|
||||
while (besttd->td_lock != &sc->sc_lock) {
|
||||
td = TAILQ_PREV_FAST(besttd, head, thread, td_slpq);
|
||||
if (td == NULL)
|
||||
break;
|
||||
besttd = td;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Find the highest priority thread on the queue. If there
|
||||
* is a tie, use the thread that first appears in the queue
|
||||
* as it has been sleeping the longest since threads are
|
||||
* always added to the tail of sleep queues.
|
||||
*/
|
||||
besttd = td = TAILQ_FIRST(head);
|
||||
while ((td = TAILQ_NEXT(td, td_slpq)) != NULL) {
|
||||
if (td->td_priority < besttd->td_priority)
|
||||
besttd = td;
|
||||
}
|
||||
}
|
||||
#else /* __rtems__ */
|
||||
besttd = TAILQ_FIRST(&sq->sq_blocked[queue]);
|
||||
#endif /* __rtems__ */
|
||||
|
@ -841,7 +841,7 @@ taskqueue_thread_enqueue(void *context)
|
||||
|
||||
tqp = context;
|
||||
tq = *tqp;
|
||||
wakeup_one(tq);
|
||||
wakeup_any(tq);
|
||||
}
|
||||
|
||||
TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
|
||||
|
@ -817,7 +817,7 @@ pipe_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
|
||||
rpipe->pipe_map.pos += size;
|
||||
rpipe->pipe_map.cnt -= size;
|
||||
if (rpipe->pipe_map.cnt == 0) {
|
||||
rpipe->pipe_state &= ~(PIPE_DIRECTW|PIPE_WANTW);
|
||||
rpipe->pipe_state &= ~PIPE_WANTW;
|
||||
wakeup(rpipe);
|
||||
}
|
||||
#endif
|
||||
@ -1020,13 +1020,17 @@ pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio)
|
||||
}
|
||||
|
||||
/*
|
||||
* unmap and unwire the process buffer
|
||||
* Unwire the process buffer.
|
||||
*/
|
||||
static void
|
||||
pipe_destroy_write_buffer(struct pipe *wpipe)
|
||||
{
|
||||
|
||||
PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
|
||||
KASSERT((wpipe->pipe_state & PIPE_DIRECTW) != 0,
|
||||
("%s: PIPE_DIRECTW not set on %p", __func__, wpipe));
|
||||
|
||||
wpipe->pipe_state &= ~PIPE_DIRECTW;
|
||||
vm_page_unhold_pages(wpipe->pipe_map.ms, wpipe->pipe_map.npages);
|
||||
wpipe->pipe_map.npages = 0;
|
||||
}
|
||||
@ -1045,13 +1049,15 @@ pipe_clone_write_buffer(struct pipe *wpipe)
|
||||
int pos;
|
||||
|
||||
PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
|
||||
KASSERT((wpipe->pipe_state & PIPE_DIRECTW) != 0,
|
||||
("%s: PIPE_DIRECTW not set on %p", __func__, wpipe));
|
||||
|
||||
size = wpipe->pipe_map.cnt;
|
||||
pos = wpipe->pipe_map.pos;
|
||||
|
||||
wpipe->pipe_buffer.in = size;
|
||||
wpipe->pipe_buffer.out = 0;
|
||||
wpipe->pipe_buffer.cnt = size;
|
||||
wpipe->pipe_state &= ~PIPE_DIRECTW;
|
||||
|
||||
PIPE_UNLOCK(wpipe);
|
||||
iov.iov_base = wpipe->pipe_buffer.buffer;
|
||||
@ -1090,7 +1096,7 @@ retry:
|
||||
pipeunlock(wpipe);
|
||||
goto error1;
|
||||
}
|
||||
while (wpipe->pipe_state & PIPE_DIRECTW) {
|
||||
if (wpipe->pipe_state & PIPE_DIRECTW) {
|
||||
if (wpipe->pipe_state & PIPE_WANTR) {
|
||||
wpipe->pipe_state &= ~PIPE_WANTR;
|
||||
wakeup(wpipe);
|
||||
@ -1133,8 +1139,7 @@ retry:
|
||||
goto error1;
|
||||
}
|
||||
|
||||
error = 0;
|
||||
while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
|
||||
while (wpipe->pipe_map.cnt != 0) {
|
||||
if (wpipe->pipe_state & PIPE_EOF) {
|
||||
pipe_destroy_write_buffer(wpipe);
|
||||
pipeselwakeup(wpipe);
|
||||
@ -1152,20 +1157,19 @@ retry:
|
||||
error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
|
||||
"pipdwt", 0);
|
||||
pipelock(wpipe, 0);
|
||||
if (error != 0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (wpipe->pipe_state & PIPE_EOF)
|
||||
error = EPIPE;
|
||||
if (wpipe->pipe_state & PIPE_DIRECTW) {
|
||||
/*
|
||||
* this bit of trickery substitutes a kernel buffer for
|
||||
* the process that might be going away.
|
||||
*/
|
||||
if (error == EINTR || error == ERESTART)
|
||||
pipe_clone_write_buffer(wpipe);
|
||||
} else {
|
||||
else
|
||||
pipe_destroy_write_buffer(wpipe);
|
||||
}
|
||||
pipeunlock(wpipe);
|
||||
KASSERT((wpipe->pipe_state & PIPE_DIRECTW) == 0,
|
||||
("pipe %p leaked PIPE_DIRECTW", wpipe));
|
||||
return (error);
|
||||
|
||||
error1:
|
||||
|
@ -233,9 +233,6 @@ ttydev_leave(struct tty *tp)
|
||||
|
||||
tp->t_flags |= TF_OPENCLOSE;
|
||||
|
||||
/* Stop asynchronous I/O. */
|
||||
funsetown(&tp->t_sigio);
|
||||
|
||||
#ifndef __rtems__
|
||||
/* Remove console TTY. */
|
||||
if (constty == tp)
|
||||
@ -1138,6 +1135,9 @@ tty_rel_free(struct tty *tp)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Stop asynchronous I/O. */
|
||||
funsetown(&tp->t_sigio);
|
||||
|
||||
/* TTY can be deallocated. */
|
||||
dev = tp->t_dev;
|
||||
tp->t_dev = NULL;
|
||||
|
@ -2227,7 +2227,7 @@ soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
/* Prevent other readers from entering the socket. */
|
||||
error = sblock(sb, SBLOCKWAIT(flags));
|
||||
if (error)
|
||||
goto out;
|
||||
return (error);
|
||||
SOCKBUF_LOCK(sb);
|
||||
|
||||
/* Easy one, no space to copyout anything. */
|
||||
|
@ -1109,6 +1109,15 @@ if_detach_internal(struct ifnet *ifp, int vmove, struct if_clone **ifcp)
|
||||
curvnet->vnet_ifcnt--;
|
||||
#endif
|
||||
epoch_wait_preempt(net_epoch_preempt);
|
||||
|
||||
/*
|
||||
* Ensure all pending EPOCH(9) callbacks have been executed. This
|
||||
* fixes issues about late destruction of multicast options
|
||||
* which lead to leave group calls, which in turn access the
|
||||
* belonging ifnet structure:
|
||||
*/
|
||||
epoch_drain_callbacks(net_epoch_preempt);
|
||||
|
||||
/*
|
||||
* In any case (destroy or vmove) detach us from the groups
|
||||
* and remove/wait for pending events on the taskq.
|
||||
|
@ -96,7 +96,9 @@ static MALLOC_DEFINE(M_IPMSOURCE, "ip_msource",
|
||||
|
||||
/*
|
||||
* Locking:
|
||||
* - Lock order is: Giant, INP_WLOCK, IN_MULTI_LIST_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
|
||||
*
|
||||
* - Lock order is: Giant, IN_MULTI_LOCK, INP_WLOCK,
|
||||
* IN_MULTI_LIST_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
|
||||
* - The IF_ADDR_LOCK is implicitly taken by inm_lookup() earlier, however
|
||||
* it can be taken by code in net/if.c also.
|
||||
* - ip_moptions and in_mfilter are covered by the INP_WLOCK.
|
||||
@ -146,12 +148,11 @@ static int imf_prune(struct in_mfilter *, const struct sockaddr_in *);
|
||||
static void imf_purge(struct in_mfilter *);
|
||||
static void imf_rollback(struct in_mfilter *);
|
||||
static void imf_reap(struct in_mfilter *);
|
||||
static int imo_grow(struct ip_moptions *);
|
||||
static size_t imo_match_group(const struct ip_moptions *,
|
||||
static struct in_mfilter *
|
||||
imo_match_group(const struct ip_moptions *,
|
||||
const struct ifnet *, const struct sockaddr *);
|
||||
static struct in_msource *
|
||||
imo_match_source(const struct ip_moptions *, const size_t,
|
||||
const struct sockaddr *);
|
||||
imo_match_source(struct in_mfilter *, const struct sockaddr *);
|
||||
static void ims_merge(struct ip_msource *ims,
|
||||
const struct in_msource *lims, const int rollback);
|
||||
static int in_getmulti(struct ifnet *, const struct in_addr *,
|
||||
@ -335,6 +336,26 @@ imf_init(struct in_mfilter *imf, const int st0, const int st1)
|
||||
imf->imf_st[1] = st1;
|
||||
}
|
||||
|
||||
struct in_mfilter *
|
||||
ip_mfilter_alloc(const int mflags, const int st0, const int st1)
|
||||
{
|
||||
struct in_mfilter *imf;
|
||||
|
||||
imf = malloc(sizeof(*imf), M_INMFILTER, mflags);
|
||||
if (imf != NULL)
|
||||
imf_init(imf, st0, st1);
|
||||
|
||||
return (imf);
|
||||
}
|
||||
|
||||
void
|
||||
ip_mfilter_free(struct in_mfilter *imf)
|
||||
{
|
||||
|
||||
imf_purge(imf);
|
||||
free(imf, M_INMFILTER);
|
||||
}
|
||||
|
||||
/*
|
||||
* Function for looking up an in_multi record for an IPv4 multicast address
|
||||
* on a given interface. ifp must be valid. If no record found, return NULL.
|
||||
@ -379,90 +400,31 @@ inm_lookup(struct ifnet *ifp, const struct in_addr ina)
|
||||
return (inm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Resize the ip_moptions vector to the next power-of-two minus 1.
|
||||
* May be called with locks held; do not sleep.
|
||||
*/
|
||||
static int
|
||||
imo_grow(struct ip_moptions *imo)
|
||||
{
|
||||
struct in_multi **nmships;
|
||||
struct in_multi **omships;
|
||||
struct in_mfilter *nmfilters;
|
||||
struct in_mfilter *omfilters;
|
||||
size_t idx;
|
||||
size_t newmax;
|
||||
size_t oldmax;
|
||||
|
||||
nmships = NULL;
|
||||
nmfilters = NULL;
|
||||
omships = imo->imo_membership;
|
||||
omfilters = imo->imo_mfilters;
|
||||
oldmax = imo->imo_max_memberships;
|
||||
newmax = ((oldmax + 1) * 2) - 1;
|
||||
|
||||
if (newmax <= IP_MAX_MEMBERSHIPS) {
|
||||
nmships = (struct in_multi **)realloc(omships,
|
||||
sizeof(struct in_multi *) * newmax, M_IPMOPTS, M_NOWAIT);
|
||||
nmfilters = (struct in_mfilter *)realloc(omfilters,
|
||||
sizeof(struct in_mfilter) * newmax, M_INMFILTER, M_NOWAIT);
|
||||
if (nmships != NULL && nmfilters != NULL) {
|
||||
/* Initialize newly allocated source filter heads. */
|
||||
for (idx = oldmax; idx < newmax; idx++) {
|
||||
imf_init(&nmfilters[idx], MCAST_UNDEFINED,
|
||||
MCAST_EXCLUDE);
|
||||
}
|
||||
imo->imo_max_memberships = newmax;
|
||||
imo->imo_membership = nmships;
|
||||
imo->imo_mfilters = nmfilters;
|
||||
}
|
||||
}
|
||||
|
||||
if (nmships == NULL || nmfilters == NULL) {
|
||||
if (nmships != NULL)
|
||||
free(nmships, M_IPMOPTS);
|
||||
if (nmfilters != NULL)
|
||||
free(nmfilters, M_INMFILTER);
|
||||
return (ETOOMANYREFS);
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find an IPv4 multicast group entry for this ip_moptions instance
|
||||
* which matches the specified group, and optionally an interface.
|
||||
* Return its index into the array, or -1 if not found.
|
||||
*/
|
||||
static size_t
|
||||
static struct in_mfilter *
|
||||
imo_match_group(const struct ip_moptions *imo, const struct ifnet *ifp,
|
||||
const struct sockaddr *group)
|
||||
{
|
||||
const struct sockaddr_in *gsin;
|
||||
struct in_multi **pinm;
|
||||
int idx;
|
||||
int nmships;
|
||||
struct in_mfilter *imf;
|
||||
struct in_multi *inm;
|
||||
|
||||
gsin = (const struct sockaddr_in *)group;
|
||||
|
||||
/* The imo_membership array may be lazy allocated. */
|
||||
if (imo->imo_membership == NULL || imo->imo_num_memberships == 0)
|
||||
return (-1);
|
||||
|
||||
nmships = imo->imo_num_memberships;
|
||||
pinm = &imo->imo_membership[0];
|
||||
for (idx = 0; idx < nmships; idx++, pinm++) {
|
||||
if (*pinm == NULL)
|
||||
IP_MFILTER_FOREACH(imf, &imo->imo_head) {
|
||||
inm = imf->imf_inm;
|
||||
if (inm == NULL)
|
||||
continue;
|
||||
if ((ifp == NULL || ((*pinm)->inm_ifp == ifp)) &&
|
||||
in_hosteq((*pinm)->inm_addr, gsin->sin_addr)) {
|
||||
if ((ifp == NULL || (inm->inm_ifp == ifp)) &&
|
||||
in_hosteq(inm->inm_addr, gsin->sin_addr)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (idx >= nmships)
|
||||
idx = -1;
|
||||
|
||||
return (idx);
|
||||
return (imf);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -473,22 +435,13 @@ imo_match_group(const struct ip_moptions *imo, const struct ifnet *ifp,
|
||||
* it exists, which may not be the desired behaviour.
|
||||
*/
|
||||
static struct in_msource *
|
||||
imo_match_source(const struct ip_moptions *imo, const size_t gidx,
|
||||
const struct sockaddr *src)
|
||||
imo_match_source(struct in_mfilter *imf, const struct sockaddr *src)
|
||||
{
|
||||
struct ip_msource find;
|
||||
struct in_mfilter *imf;
|
||||
struct ip_msource *ims;
|
||||
const sockunion_t *psa;
|
||||
|
||||
KASSERT(src->sa_family == AF_INET, ("%s: !AF_INET", __func__));
|
||||
KASSERT(gidx != -1 && gidx < imo->imo_num_memberships,
|
||||
("%s: invalid index %d\n", __func__, (int)gidx));
|
||||
|
||||
/* The imo_mfilters array may be lazy allocated. */
|
||||
if (imo->imo_mfilters == NULL)
|
||||
return (NULL);
|
||||
imf = &imo->imo_mfilters[gidx];
|
||||
|
||||
/* Source trees are keyed in host byte order. */
|
||||
psa = (const sockunion_t *)src;
|
||||
@ -508,14 +461,14 @@ int
|
||||
imo_multi_filter(const struct ip_moptions *imo, const struct ifnet *ifp,
|
||||
const struct sockaddr *group, const struct sockaddr *src)
|
||||
{
|
||||
size_t gidx;
|
||||
struct in_mfilter *imf;
|
||||
struct in_msource *ims;
|
||||
int mode;
|
||||
|
||||
KASSERT(ifp != NULL, ("%s: null ifp", __func__));
|
||||
|
||||
gidx = imo_match_group(imo, ifp, group);
|
||||
if (gidx == -1)
|
||||
imf = imo_match_group(imo, ifp, group);
|
||||
if (imf == NULL)
|
||||
return (MCAST_NOTGMEMBER);
|
||||
|
||||
/*
|
||||
@ -527,8 +480,8 @@ imo_multi_filter(const struct ip_moptions *imo, const struct ifnet *ifp,
|
||||
* NOTE: We are comparing group state here at IGMP t1 (now)
|
||||
* with socket-layer t0 (since last downcall).
|
||||
*/
|
||||
mode = imo->imo_mfilters[gidx].imf_st[1];
|
||||
ims = imo_match_source(imo, gidx, src);
|
||||
mode = imf->imf_st[1];
|
||||
ims = imo_match_source(imf, src);
|
||||
|
||||
if ((ims == NULL && mode == MCAST_INCLUDE) ||
|
||||
(ims != NULL && ims->imsl_st[0] != mode))
|
||||
@ -1453,7 +1406,6 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
|
||||
struct ip_moptions *imo;
|
||||
struct in_msource *ims;
|
||||
struct in_multi *inm;
|
||||
size_t idx;
|
||||
uint16_t fmode;
|
||||
int error, doblock;
|
||||
|
||||
@ -1532,20 +1484,18 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
|
||||
if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr)))
|
||||
return (EINVAL);
|
||||
|
||||
IN_MULTI_LOCK();
|
||||
|
||||
/*
|
||||
* Check if we are actually a member of this group.
|
||||
*/
|
||||
imo = inp_findmoptions(inp);
|
||||
idx = imo_match_group(imo, ifp, &gsa->sa);
|
||||
if (idx == -1 || imo->imo_mfilters == NULL) {
|
||||
imf = imo_match_group(imo, ifp, &gsa->sa);
|
||||
if (imf == NULL) {
|
||||
error = EADDRNOTAVAIL;
|
||||
goto out_inp_locked;
|
||||
}
|
||||
|
||||
KASSERT(imo->imo_mfilters != NULL,
|
||||
("%s: imo_mfilters not allocated", __func__));
|
||||
imf = &imo->imo_mfilters[idx];
|
||||
inm = imo->imo_membership[idx];
|
||||
inm = imf->imf_inm;
|
||||
|
||||
/*
|
||||
* Attempting to use the delta-based API on an
|
||||
@ -1563,7 +1513,7 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
|
||||
* Asked to unblock, but nothing to unblock.
|
||||
* If adding a new block entry, allocate it.
|
||||
*/
|
||||
ims = imo_match_source(imo, idx, &ssa->sa);
|
||||
ims = imo_match_source(imf, &ssa->sa);
|
||||
if ((ims != NULL && doblock) || (ims == NULL && !doblock)) {
|
||||
CTR3(KTR_IGMPV3, "%s: source 0x%08x %spresent", __func__,
|
||||
ntohl(ssa->sin.sin_addr.s_addr), doblock ? "" : "not ");
|
||||
@ -1594,14 +1544,13 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
|
||||
/*
|
||||
* Begin state merge transaction at IGMP layer.
|
||||
*/
|
||||
IN_MULTI_LOCK();
|
||||
CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
|
||||
IN_MULTI_LIST_LOCK();
|
||||
error = inm_merge(inm, imf);
|
||||
if (error) {
|
||||
CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__);
|
||||
IN_MULTI_LIST_UNLOCK();
|
||||
goto out_in_multi_locked;
|
||||
goto out_imf_rollback;
|
||||
}
|
||||
|
||||
CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
|
||||
@ -1610,9 +1559,6 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
|
||||
if (error)
|
||||
CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__);
|
||||
|
||||
out_in_multi_locked:
|
||||
|
||||
IN_MULTI_UNLOCK();
|
||||
out_imf_rollback:
|
||||
if (error)
|
||||
imf_rollback(imf);
|
||||
@ -1623,6 +1569,7 @@ out_imf_rollback:
|
||||
|
||||
out_inp_locked:
|
||||
INP_WUNLOCK(inp);
|
||||
IN_MULTI_UNLOCK();
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -1637,9 +1584,6 @@ static struct ip_moptions *
|
||||
inp_findmoptions(struct inpcb *inp)
|
||||
{
|
||||
struct ip_moptions *imo;
|
||||
struct in_multi **immp;
|
||||
struct in_mfilter *imfp;
|
||||
size_t idx;
|
||||
|
||||
INP_WLOCK(inp);
|
||||
if (inp->inp_moptions != NULL)
|
||||
@ -1648,29 +1592,16 @@ inp_findmoptions(struct inpcb *inp)
|
||||
INP_WUNLOCK(inp);
|
||||
|
||||
imo = malloc(sizeof(*imo), M_IPMOPTS, M_WAITOK);
|
||||
immp = malloc(sizeof(*immp) * IP_MIN_MEMBERSHIPS, M_IPMOPTS,
|
||||
M_WAITOK | M_ZERO);
|
||||
imfp = malloc(sizeof(struct in_mfilter) * IP_MIN_MEMBERSHIPS,
|
||||
M_INMFILTER, M_WAITOK);
|
||||
|
||||
imo->imo_multicast_ifp = NULL;
|
||||
imo->imo_multicast_addr.s_addr = INADDR_ANY;
|
||||
imo->imo_multicast_vif = -1;
|
||||
imo->imo_multicast_ttl = IP_DEFAULT_MULTICAST_TTL;
|
||||
imo->imo_multicast_loop = in_mcast_loop;
|
||||
imo->imo_num_memberships = 0;
|
||||
imo->imo_max_memberships = IP_MIN_MEMBERSHIPS;
|
||||
imo->imo_membership = immp;
|
||||
|
||||
/* Initialize per-group source filters. */
|
||||
for (idx = 0; idx < IP_MIN_MEMBERSHIPS; idx++)
|
||||
imf_init(&imfp[idx], MCAST_UNDEFINED, MCAST_EXCLUDE);
|
||||
imo->imo_mfilters = imfp;
|
||||
STAILQ_INIT(&imo->imo_head);
|
||||
|
||||
INP_WLOCK(inp);
|
||||
if (inp->inp_moptions != NULL) {
|
||||
free(imfp, M_INMFILTER);
|
||||
free(immp, M_IPMOPTS);
|
||||
free(imo, M_IPMOPTS);
|
||||
return (inp->inp_moptions);
|
||||
}
|
||||
@ -1684,29 +1615,22 @@ inp_gcmoptions(struct ip_moptions *imo)
|
||||
struct in_mfilter *imf;
|
||||
struct in_multi *inm;
|
||||
struct ifnet *ifp;
|
||||
size_t idx, nmships;
|
||||
|
||||
nmships = imo->imo_num_memberships;
|
||||
for (idx = 0; idx < nmships; ++idx) {
|
||||
imf = imo->imo_mfilters ? &imo->imo_mfilters[idx] : NULL;
|
||||
if (imf)
|
||||
while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) {
|
||||
ip_mfilter_remove(&imo->imo_head, imf);
|
||||
|
||||
imf_leave(imf);
|
||||
inm = imo->imo_membership[idx];
|
||||
ifp = inm->inm_ifp;
|
||||
if (ifp != NULL) {
|
||||
if ((inm = imf->imf_inm) != NULL) {
|
||||
if ((ifp = inm->inm_ifp) != NULL) {
|
||||
CURVNET_SET(ifp->if_vnet);
|
||||
(void)in_leavegroup(inm, imf);
|
||||
CURVNET_RESTORE();
|
||||
} else {
|
||||
(void)in_leavegroup(inm, imf);
|
||||
}
|
||||
if (imf)
|
||||
imf_purge(imf);
|
||||
}
|
||||
|
||||
if (imo->imo_mfilters)
|
||||
free(imo->imo_mfilters, M_INMFILTER);
|
||||
free(imo->imo_membership, M_IPMOPTS);
|
||||
ip_mfilter_free(imf);
|
||||
}
|
||||
free(imo, M_IPMOPTS);
|
||||
}
|
||||
|
||||
@ -1742,7 +1666,7 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt)
|
||||
struct sockaddr_storage *ptss;
|
||||
struct sockaddr_storage *tss;
|
||||
int error;
|
||||
size_t idx, nsrcs, ncsrcs;
|
||||
size_t nsrcs, ncsrcs;
|
||||
|
||||
INP_WLOCK_ASSERT(inp);
|
||||
|
||||
@ -1769,12 +1693,11 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt)
|
||||
* Lookup group on the socket.
|
||||
*/
|
||||
gsa = (sockunion_t *)&msfr.msfr_group;
|
||||
idx = imo_match_group(imo, ifp, &gsa->sa);
|
||||
if (idx == -1 || imo->imo_mfilters == NULL) {
|
||||
imf = imo_match_group(imo, ifp, &gsa->sa);
|
||||
if (imf == NULL) {
|
||||
INP_WUNLOCK(inp);
|
||||
return (EADDRNOTAVAIL);
|
||||
}
|
||||
imf = &imo->imo_mfilters[idx];
|
||||
|
||||
/*
|
||||
* Ignore memberships which are in limbo.
|
||||
@ -2032,14 +1955,11 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
struct ip_moptions *imo;
|
||||
struct in_multi *inm;
|
||||
struct in_msource *lims;
|
||||
size_t idx;
|
||||
int error, is_new;
|
||||
|
||||
ifp = NULL;
|
||||
imf = NULL;
|
||||
lims = NULL;
|
||||
error = 0;
|
||||
is_new = 0;
|
||||
|
||||
memset(&gsr, 0, sizeof(struct group_source_req));
|
||||
gsa = (sockunion_t *)&gsr.gsr_group;
|
||||
@ -2147,13 +2067,25 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0)
|
||||
return (EADDRNOTAVAIL);
|
||||
|
||||
IN_MULTI_LOCK();
|
||||
|
||||
/*
|
||||
* Find the membership in the membership list.
|
||||
*/
|
||||
imo = inp_findmoptions(inp);
|
||||
idx = imo_match_group(imo, ifp, &gsa->sa);
|
||||
if (idx == -1) {
|
||||
imf = imo_match_group(imo, ifp, &gsa->sa);
|
||||
if (imf == NULL) {
|
||||
is_new = 1;
|
||||
inm = NULL;
|
||||
|
||||
if (ip_mfilter_count(&imo->imo_head) >= IP_MAX_MEMBERSHIPS) {
|
||||
error = ENOMEM;
|
||||
goto out_inp_locked;
|
||||
}
|
||||
} else {
|
||||
inm = imo->imo_membership[idx];
|
||||
imf = &imo->imo_mfilters[idx];
|
||||
is_new = 0;
|
||||
inm = imf->imf_inm;
|
||||
|
||||
if (ssa->ss.ss_family != AF_UNSPEC) {
|
||||
/*
|
||||
* MCAST_JOIN_SOURCE_GROUP on an exclusive membership
|
||||
@ -2180,7 +2112,7 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
* full-state SSM API with the delta-based API,
|
||||
* which is discouraged in the relevant RFCs.
|
||||
*/
|
||||
lims = imo_match_source(imo, idx, &ssa->sa);
|
||||
lims = imo_match_source(imf, &ssa->sa);
|
||||
if (lims != NULL /*&&
|
||||
lims->imsl_st[1] == MCAST_INCLUDE*/) {
|
||||
error = EADDRNOTAVAIL;
|
||||
@ -2213,27 +2145,6 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
*/
|
||||
INP_WLOCK_ASSERT(inp);
|
||||
|
||||
if (is_new) {
|
||||
if (imo->imo_num_memberships == imo->imo_max_memberships) {
|
||||
error = imo_grow(imo);
|
||||
if (error)
|
||||
goto out_inp_locked;
|
||||
}
|
||||
/*
|
||||
* Allocate the new slot upfront so we can deal with
|
||||
* grafting the new source filter in same code path
|
||||
* as for join-source on existing membership.
|
||||
*/
|
||||
idx = imo->imo_num_memberships;
|
||||
imo->imo_membership[idx] = NULL;
|
||||
imo->imo_num_memberships++;
|
||||
KASSERT(imo->imo_mfilters != NULL,
|
||||
("%s: imf_mfilters vector was not allocated", __func__));
|
||||
imf = &imo->imo_mfilters[idx];
|
||||
KASSERT(RB_EMPTY(&imf->imf_sources),
|
||||
("%s: imf_sources not empty", __func__));
|
||||
}
|
||||
|
||||
/*
|
||||
* Graft new source into filter list for this inpcb's
|
||||
* membership of the group. The in_multi may not have
|
||||
@ -2249,7 +2160,11 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
/* Membership starts in IN mode */
|
||||
if (is_new) {
|
||||
CTR1(KTR_IGMPV3, "%s: new join w/source", __func__);
|
||||
imf_init(imf, MCAST_UNDEFINED, MCAST_INCLUDE);
|
||||
imf = ip_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_INCLUDE);
|
||||
if (imf == NULL) {
|
||||
error = ENOMEM;
|
||||
goto out_inp_locked;
|
||||
}
|
||||
} else {
|
||||
CTR2(KTR_IGMPV3, "%s: %s source", __func__, "allow");
|
||||
}
|
||||
@ -2258,34 +2173,41 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
CTR1(KTR_IGMPV3, "%s: merge imf state failed",
|
||||
__func__);
|
||||
error = ENOMEM;
|
||||
goto out_imo_free;
|
||||
goto out_inp_locked;
|
||||
}
|
||||
} else {
|
||||
/* No address specified; Membership starts in EX mode */
|
||||
if (is_new) {
|
||||
CTR1(KTR_IGMPV3, "%s: new join w/o source", __func__);
|
||||
imf_init(imf, MCAST_UNDEFINED, MCAST_EXCLUDE);
|
||||
imf = ip_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_EXCLUDE);
|
||||
if (imf == NULL) {
|
||||
error = ENOMEM;
|
||||
goto out_inp_locked;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Begin state merge transaction at IGMP layer.
|
||||
*/
|
||||
if (is_new) {
|
||||
in_pcbref(inp);
|
||||
INP_WUNLOCK(inp);
|
||||
IN_MULTI_LOCK();
|
||||
|
||||
if (is_new) {
|
||||
error = in_joingroup_locked(ifp, &gsa->sin.sin_addr, imf,
|
||||
&inm);
|
||||
&imf->imf_inm);
|
||||
|
||||
INP_WLOCK(inp);
|
||||
if (in_pcbrele_wlocked(inp)) {
|
||||
error = ENXIO;
|
||||
goto out_inp_unlocked;
|
||||
}
|
||||
if (error) {
|
||||
CTR1(KTR_IGMPV3, "%s: in_joingroup_locked failed",
|
||||
__func__);
|
||||
IN_MULTI_LIST_UNLOCK();
|
||||
goto out_imo_free;
|
||||
goto out_inp_locked;
|
||||
}
|
||||
inm_acquire(inm);
|
||||
imo->imo_membership[idx] = inm;
|
||||
inm_acquire(imf->imf_inm);
|
||||
} else {
|
||||
CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
|
||||
IN_MULTI_LIST_LOCK();
|
||||
@ -2294,7 +2216,9 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
CTR1(KTR_IGMPV3, "%s: failed to merge inm state",
|
||||
__func__);
|
||||
IN_MULTI_LIST_UNLOCK();
|
||||
goto out_in_multi_locked;
|
||||
imf_rollback(imf);
|
||||
imf_reap(imf);
|
||||
goto out_inp_locked;
|
||||
}
|
||||
CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
|
||||
error = igmp_change_state(inm);
|
||||
@ -2302,40 +2226,30 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
if (error) {
|
||||
CTR1(KTR_IGMPV3, "%s: failed igmp downcall",
|
||||
__func__);
|
||||
goto out_in_multi_locked;
|
||||
}
|
||||
}
|
||||
|
||||
out_in_multi_locked:
|
||||
|
||||
IN_MULTI_UNLOCK();
|
||||
INP_WLOCK(inp);
|
||||
if (in_pcbrele_wlocked(inp))
|
||||
return (ENXIO);
|
||||
if (error) {
|
||||
imf_rollback(imf);
|
||||
if (is_new)
|
||||
imf_purge(imf);
|
||||
else
|
||||
imf_reap(imf);
|
||||
} else {
|
||||
imf_commit(imf);
|
||||
goto out_inp_locked;
|
||||
}
|
||||
}
|
||||
if (is_new)
|
||||
ip_mfilter_insert(&imo->imo_head, imf);
|
||||
|
||||
out_imo_free:
|
||||
if (error && is_new) {
|
||||
inm = imo->imo_membership[idx];
|
||||
if (inm != NULL) {
|
||||
IN_MULTI_LIST_LOCK();
|
||||
inm_release_deferred(inm);
|
||||
IN_MULTI_LIST_UNLOCK();
|
||||
}
|
||||
imo->imo_membership[idx] = NULL;
|
||||
--imo->imo_num_memberships;
|
||||
}
|
||||
imf_commit(imf);
|
||||
imf = NULL;
|
||||
|
||||
out_inp_locked:
|
||||
INP_WUNLOCK(inp);
|
||||
out_inp_unlocked:
|
||||
IN_MULTI_UNLOCK();
|
||||
|
||||
if (is_new && imf) {
|
||||
if (imf->imf_inm != NULL) {
|
||||
IN_MULTI_LIST_LOCK();
|
||||
inm_release_deferred(imf->imf_inm);
|
||||
IN_MULTI_LIST_UNLOCK();
|
||||
}
|
||||
ip_mfilter_free(imf);
|
||||
}
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -2354,12 +2268,12 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
struct ip_moptions *imo;
|
||||
struct in_msource *ims;
|
||||
struct in_multi *inm;
|
||||
size_t idx;
|
||||
int error, is_final;
|
||||
int error;
|
||||
bool is_final;
|
||||
|
||||
ifp = NULL;
|
||||
error = 0;
|
||||
is_final = 1;
|
||||
is_final = true;
|
||||
|
||||
memset(&gsr, 0, sizeof(struct group_source_req));
|
||||
gsa = (sockunion_t *)&gsr.gsr_group;
|
||||
@ -2459,20 +2373,21 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr)))
|
||||
return (EINVAL);
|
||||
|
||||
IN_MULTI_LOCK();
|
||||
|
||||
/*
|
||||
* Find the membership in the membership array.
|
||||
* Find the membership in the membership list.
|
||||
*/
|
||||
imo = inp_findmoptions(inp);
|
||||
idx = imo_match_group(imo, ifp, &gsa->sa);
|
||||
if (idx == -1) {
|
||||
imf = imo_match_group(imo, ifp, &gsa->sa);
|
||||
if (imf == NULL) {
|
||||
error = EADDRNOTAVAIL;
|
||||
goto out_inp_locked;
|
||||
}
|
||||
inm = imo->imo_membership[idx];
|
||||
imf = &imo->imo_mfilters[idx];
|
||||
inm = imf->imf_inm;
|
||||
|
||||
if (ssa->ss.ss_family != AF_UNSPEC)
|
||||
is_final = 0;
|
||||
is_final = false;
|
||||
|
||||
/*
|
||||
* Begin state merge transaction at socket layer.
|
||||
@ -2484,13 +2399,14 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
* MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships.
|
||||
*/
|
||||
if (is_final) {
|
||||
ip_mfilter_remove(&imo->imo_head, imf);
|
||||
imf_leave(imf);
|
||||
} else {
|
||||
if (imf->imf_st[0] == MCAST_EXCLUDE) {
|
||||
error = EADDRNOTAVAIL;
|
||||
goto out_inp_locked;
|
||||
}
|
||||
ims = imo_match_source(imo, idx, &ssa->sa);
|
||||
ims = imo_match_source(imf, &ssa->sa);
|
||||
if (ims == NULL) {
|
||||
CTR3(KTR_IGMPV3, "%s: source 0x%08x %spresent",
|
||||
__func__, ntohl(ssa->sin.sin_addr.s_addr), "not ");
|
||||
@ -2509,17 +2425,7 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
/*
|
||||
* Begin state merge transaction at IGMP layer.
|
||||
*/
|
||||
in_pcbref(inp);
|
||||
INP_WUNLOCK(inp);
|
||||
IN_MULTI_LOCK();
|
||||
|
||||
if (is_final) {
|
||||
/*
|
||||
* Give up the multicast address record to which
|
||||
* the membership points.
|
||||
*/
|
||||
(void)in_leavegroup_locked(inm, imf);
|
||||
} else {
|
||||
if (!is_final) {
|
||||
CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
|
||||
IN_MULTI_LIST_LOCK();
|
||||
error = inm_merge(inm, imf);
|
||||
@ -2527,7 +2433,9 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
CTR1(KTR_IGMPV3, "%s: failed to merge inm state",
|
||||
__func__);
|
||||
IN_MULTI_LIST_UNLOCK();
|
||||
goto out_in_multi_locked;
|
||||
imf_rollback(imf);
|
||||
imf_reap(imf);
|
||||
goto out_inp_locked;
|
||||
}
|
||||
|
||||
CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
|
||||
@ -2536,38 +2444,27 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
if (error) {
|
||||
CTR1(KTR_IGMPV3, "%s: failed igmp downcall",
|
||||
__func__);
|
||||
}
|
||||
}
|
||||
|
||||
out_in_multi_locked:
|
||||
|
||||
IN_MULTI_UNLOCK();
|
||||
INP_WLOCK(inp);
|
||||
if (in_pcbrele_wlocked(inp))
|
||||
return (ENXIO);
|
||||
|
||||
if (error)
|
||||
imf_rollback(imf);
|
||||
else
|
||||
imf_commit(imf);
|
||||
|
||||
imf_reap(imf);
|
||||
|
||||
if (is_final) {
|
||||
/* Remove the gap in the membership and filter array. */
|
||||
KASSERT(RB_EMPTY(&imf->imf_sources),
|
||||
("%s: imf_sources not empty", __func__));
|
||||
for (++idx; idx < imo->imo_num_memberships; ++idx) {
|
||||
imo->imo_membership[idx - 1] = imo->imo_membership[idx];
|
||||
imo->imo_mfilters[idx - 1] = imo->imo_mfilters[idx];
|
||||
goto out_inp_locked;
|
||||
}
|
||||
imf_init(&imo->imo_mfilters[idx - 1], MCAST_UNDEFINED,
|
||||
MCAST_EXCLUDE);
|
||||
imo->imo_num_memberships--;
|
||||
}
|
||||
imf_commit(imf);
|
||||
imf_reap(imf);
|
||||
|
||||
out_inp_locked:
|
||||
INP_WUNLOCK(inp);
|
||||
|
||||
if (is_final && imf) {
|
||||
/*
|
||||
* Give up the multicast address record to which
|
||||
* the membership points.
|
||||
*/
|
||||
(void) in_leavegroup_locked(imf->imf_inm, imf);
|
||||
ip_mfilter_free(imf);
|
||||
}
|
||||
|
||||
IN_MULTI_UNLOCK();
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -2657,7 +2554,6 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
|
||||
struct in_mfilter *imf;
|
||||
struct ip_moptions *imo;
|
||||
struct in_multi *inm;
|
||||
size_t idx;
|
||||
int error;
|
||||
|
||||
error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq),
|
||||
@ -2689,18 +2585,19 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
|
||||
if (ifp == NULL)
|
||||
return (EADDRNOTAVAIL);
|
||||
|
||||
IN_MULTI_LOCK();
|
||||
|
||||
/*
|
||||
* Take the INP write lock.
|
||||
* Check if this socket is a member of this group.
|
||||
*/
|
||||
imo = inp_findmoptions(inp);
|
||||
idx = imo_match_group(imo, ifp, &gsa->sa);
|
||||
if (idx == -1 || imo->imo_mfilters == NULL) {
|
||||
imf = imo_match_group(imo, ifp, &gsa->sa);
|
||||
if (imf == NULL) {
|
||||
error = EADDRNOTAVAIL;
|
||||
goto out_inp_locked;
|
||||
}
|
||||
inm = imo->imo_membership[idx];
|
||||
imf = &imo->imo_mfilters[idx];
|
||||
inm = imf->imf_inm;
|
||||
|
||||
/*
|
||||
* Begin state merge transaction at socket layer.
|
||||
@ -2777,7 +2674,6 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
|
||||
goto out_imf_rollback;
|
||||
|
||||
INP_WLOCK_ASSERT(inp);
|
||||
IN_MULTI_LOCK();
|
||||
|
||||
/*
|
||||
* Begin state merge transaction at IGMP layer.
|
||||
@ -2788,7 +2684,7 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
|
||||
if (error) {
|
||||
CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__);
|
||||
IN_MULTI_LIST_UNLOCK();
|
||||
goto out_in_multi_locked;
|
||||
goto out_imf_rollback;
|
||||
}
|
||||
|
||||
CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
|
||||
@ -2797,10 +2693,6 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
|
||||
if (error)
|
||||
CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__);
|
||||
|
||||
out_in_multi_locked:
|
||||
|
||||
IN_MULTI_UNLOCK();
|
||||
|
||||
out_imf_rollback:
|
||||
if (error)
|
||||
imf_rollback(imf);
|
||||
@ -2811,6 +2703,7 @@ out_imf_rollback:
|
||||
|
||||
out_inp_locked:
|
||||
INP_WUNLOCK(inp);
|
||||
IN_MULTI_UNLOCK();
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
@ -92,6 +92,9 @@ __FBSDID("$FreeBSD$");
|
||||
#if defined(INET) || defined(INET6)
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/in_pcb.h>
|
||||
#ifdef INET
|
||||
#include <netinet/in_var.h>
|
||||
#endif
|
||||
#include <netinet/ip_var.h>
|
||||
#include <netinet/tcp_var.h>
|
||||
#ifdef TCPHPTS
|
||||
@ -99,16 +102,13 @@ __FBSDID("$FreeBSD$");
|
||||
#endif
|
||||
#include <netinet/udp.h>
|
||||
#include <netinet/udp_var.h>
|
||||
#endif
|
||||
#ifdef INET
|
||||
#include <netinet/in_var.h>
|
||||
#endif
|
||||
#ifdef INET6
|
||||
#include <netinet/ip6.h>
|
||||
#include <netinet6/in6_pcb.h>
|
||||
#include <netinet6/in6_var.h>
|
||||
#include <netinet6/ip6_var.h>
|
||||
#endif /* INET6 */
|
||||
#endif
|
||||
|
||||
#include <netipsec/ipsec_support.h>
|
||||
|
||||
@ -1789,8 +1789,9 @@ void
|
||||
in_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp)
|
||||
{
|
||||
struct inpcb *inp;
|
||||
struct in_multi *inm;
|
||||
struct in_mfilter *imf;
|
||||
struct ip_moptions *imo;
|
||||
int i, gap;
|
||||
|
||||
INP_INFO_WLOCK(pcbinfo);
|
||||
CK_LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
|
||||
@ -1811,17 +1812,18 @@ in_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp)
|
||||
*
|
||||
* XXX This can all be deferred to an epoch_call
|
||||
*/
|
||||
for (i = 0, gap = 0; i < imo->imo_num_memberships;
|
||||
i++) {
|
||||
if (imo->imo_membership[i]->inm_ifp == ifp) {
|
||||
restart:
|
||||
IP_MFILTER_FOREACH(imf, &imo->imo_head) {
|
||||
if ((inm = imf->imf_inm) == NULL)
|
||||
continue;
|
||||
if (inm->inm_ifp != ifp)
|
||||
continue;
|
||||
ip_mfilter_remove(&imo->imo_head, imf);
|
||||
IN_MULTI_LOCK_ASSERT();
|
||||
in_leavegroup_locked(imo->imo_membership[i], NULL);
|
||||
gap++;
|
||||
} else if (gap != 0)
|
||||
imo->imo_membership[i - gap] =
|
||||
imo->imo_membership[i];
|
||||
in_leavegroup_locked(inm, NULL);
|
||||
ip_mfilter_free(imf);
|
||||
goto restart;
|
||||
}
|
||||
imo->imo_num_memberships -= gap;
|
||||
}
|
||||
INP_WUNLOCK(inp);
|
||||
}
|
||||
|
@ -232,8 +232,60 @@ struct in_mfilter {
|
||||
struct ip_msource_tree imf_sources; /* source list for (S,G) */
|
||||
u_long imf_nsrc; /* # of source entries */
|
||||
uint8_t imf_st[2]; /* state before/at commit */
|
||||
struct in_multi *imf_inm; /* associated multicast address */
|
||||
STAILQ_ENTRY(in_mfilter) imf_entry; /* list entry */
|
||||
};
|
||||
|
||||
/*
|
||||
* Helper types and functions for IPv4 multicast filters.
|
||||
*/
|
||||
STAILQ_HEAD(ip_mfilter_head, in_mfilter);
|
||||
|
||||
struct in_mfilter *ip_mfilter_alloc(int mflags, int st0, int st1);
|
||||
void ip_mfilter_free(struct in_mfilter *);
|
||||
|
||||
static inline void
|
||||
ip_mfilter_init(struct ip_mfilter_head *head)
|
||||
{
|
||||
|
||||
STAILQ_INIT(head);
|
||||
}
|
||||
|
||||
static inline struct in_mfilter *
|
||||
ip_mfilter_first(const struct ip_mfilter_head *head)
|
||||
{
|
||||
|
||||
return (STAILQ_FIRST(head));
|
||||
}
|
||||
|
||||
static inline void
|
||||
ip_mfilter_insert(struct ip_mfilter_head *head, struct in_mfilter *imf)
|
||||
{
|
||||
|
||||
STAILQ_INSERT_TAIL(head, imf, imf_entry);
|
||||
}
|
||||
|
||||
static inline void
|
||||
ip_mfilter_remove(struct ip_mfilter_head *head, struct in_mfilter *imf)
|
||||
{
|
||||
|
||||
STAILQ_REMOVE(head, imf, in_mfilter, imf_entry);
|
||||
}
|
||||
|
||||
#define IP_MFILTER_FOREACH(imf, head) \
|
||||
STAILQ_FOREACH(imf, head, imf_entry)
|
||||
|
||||
static inline size_t
|
||||
ip_mfilter_count(struct ip_mfilter_head *head)
|
||||
{
|
||||
struct in_mfilter *imf;
|
||||
size_t num = 0;
|
||||
|
||||
STAILQ_FOREACH(imf, head, imf_entry)
|
||||
num++;
|
||||
return (num);
|
||||
}
|
||||
|
||||
/*
|
||||
* IPv4 group descriptor.
|
||||
*
|
||||
|
@ -1369,25 +1369,24 @@ carp_multicast_setup(struct carp_if *cif, sa_family_t sa)
|
||||
case AF_INET:
|
||||
{
|
||||
struct ip_moptions *imo = &cif->cif_imo;
|
||||
struct in_mfilter *imf;
|
||||
struct in_addr addr;
|
||||
|
||||
if (imo->imo_membership)
|
||||
if (ip_mfilter_first(&imo->imo_head) != NULL)
|
||||
return (0);
|
||||
|
||||
imo->imo_membership = (struct in_multi **)malloc(
|
||||
(sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_CARP,
|
||||
M_WAITOK);
|
||||
imo->imo_mfilters = NULL;
|
||||
imo->imo_max_memberships = IP_MIN_MEMBERSHIPS;
|
||||
imf = ip_mfilter_alloc(M_WAITOK, 0, 0);
|
||||
ip_mfilter_init(&imo->imo_head);
|
||||
imo->imo_multicast_vif = -1;
|
||||
|
||||
addr.s_addr = htonl(INADDR_CARP_GROUP);
|
||||
if ((error = in_joingroup(ifp, &addr, NULL,
|
||||
&imo->imo_membership[0])) != 0) {
|
||||
free(imo->imo_membership, M_CARP);
|
||||
&imf->imf_inm)) != 0) {
|
||||
ip_mfilter_free(imf);
|
||||
break;
|
||||
}
|
||||
imo->imo_num_memberships++;
|
||||
|
||||
ip_mfilter_insert(&imo->imo_head, imf);
|
||||
imo->imo_multicast_ifp = ifp;
|
||||
imo->imo_multicast_ttl = CARP_DFLTTL;
|
||||
imo->imo_multicast_loop = 0;
|
||||
@ -1398,17 +1397,16 @@ carp_multicast_setup(struct carp_if *cif, sa_family_t sa)
|
||||
case AF_INET6:
|
||||
{
|
||||
struct ip6_moptions *im6o = &cif->cif_im6o;
|
||||
struct in6_mfilter *im6f[2];
|
||||
struct in6_addr in6;
|
||||
struct in6_multi *in6m;
|
||||
|
||||
if (im6o->im6o_membership)
|
||||
if (ip6_mfilter_first(&im6o->im6o_head))
|
||||
return (0);
|
||||
|
||||
im6o->im6o_membership = (struct in6_multi **)malloc(
|
||||
(sizeof(struct in6_multi *) * IPV6_MIN_MEMBERSHIPS), M_CARP,
|
||||
M_ZERO | M_WAITOK);
|
||||
im6o->im6o_mfilters = NULL;
|
||||
im6o->im6o_max_memberships = IPV6_MIN_MEMBERSHIPS;
|
||||
im6f[0] = ip6_mfilter_alloc(M_WAITOK, 0, 0);
|
||||
im6f[1] = ip6_mfilter_alloc(M_WAITOK, 0, 0);
|
||||
|
||||
ip6_mfilter_init(&im6o->im6o_head);
|
||||
im6o->im6o_multicast_hlim = CARP_DFLTTL;
|
||||
im6o->im6o_multicast_ifp = ifp;
|
||||
|
||||
@ -1417,17 +1415,15 @@ carp_multicast_setup(struct carp_if *cif, sa_family_t sa)
|
||||
in6.s6_addr16[0] = htons(0xff02);
|
||||
in6.s6_addr8[15] = 0x12;
|
||||
if ((error = in6_setscope(&in6, ifp, NULL)) != 0) {
|
||||
free(im6o->im6o_membership, M_CARP);
|
||||
ip6_mfilter_free(im6f[0]);
|
||||
ip6_mfilter_free(im6f[1]);
|
||||
break;
|
||||
}
|
||||
in6m = NULL;
|
||||
if ((error = in6_joingroup(ifp, &in6, NULL, &in6m, 0)) != 0) {
|
||||
free(im6o->im6o_membership, M_CARP);
|
||||
if ((error = in6_joingroup(ifp, &in6, NULL, &im6f[0]->im6f_in6m, 0)) != 0) {
|
||||
ip6_mfilter_free(im6f[0]);
|
||||
ip6_mfilter_free(im6f[1]);
|
||||
break;
|
||||
}
|
||||
in6m_acquire(in6m);
|
||||
im6o->im6o_membership[0] = in6m;
|
||||
im6o->im6o_num_memberships++;
|
||||
|
||||
/* Join solicited multicast address. */
|
||||
bzero(&in6, sizeof(in6));
|
||||
@ -1436,20 +1432,21 @@ carp_multicast_setup(struct carp_if *cif, sa_family_t sa)
|
||||
in6.s6_addr32[2] = htonl(1);
|
||||
in6.s6_addr32[3] = 0;
|
||||
in6.s6_addr8[12] = 0xff;
|
||||
|
||||
if ((error = in6_setscope(&in6, ifp, NULL)) != 0) {
|
||||
in6_leavegroup(im6o->im6o_membership[0], NULL);
|
||||
free(im6o->im6o_membership, M_CARP);
|
||||
ip6_mfilter_free(im6f[0]);
|
||||
ip6_mfilter_free(im6f[1]);
|
||||
break;
|
||||
}
|
||||
in6m = NULL;
|
||||
if ((error = in6_joingroup(ifp, &in6, NULL, &in6m, 0)) != 0) {
|
||||
in6_leavegroup(im6o->im6o_membership[0], NULL);
|
||||
free(im6o->im6o_membership, M_CARP);
|
||||
|
||||
if ((error = in6_joingroup(ifp, &in6, NULL, &im6f[1]->im6f_in6m, 0)) != 0) {
|
||||
in6_leavegroup(im6f[0]->im6f_in6m, NULL);
|
||||
ip6_mfilter_free(im6f[0]);
|
||||
ip6_mfilter_free(im6f[1]);
|
||||
break;
|
||||
}
|
||||
in6m_acquire(in6m);
|
||||
im6o->im6o_membership[1] = in6m;
|
||||
im6o->im6o_num_memberships++;
|
||||
ip6_mfilter_insert(&im6o->im6o_head, im6f[0]);
|
||||
ip6_mfilter_insert(&im6o->im6o_head, im6f[1]);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
@ -1464,35 +1461,38 @@ carp_multicast_setup(struct carp_if *cif, sa_family_t sa)
|
||||
static void
|
||||
carp_multicast_cleanup(struct carp_if *cif, sa_family_t sa)
|
||||
{
|
||||
|
||||
#ifdef INET
|
||||
struct ip_moptions *imo = &cif->cif_imo;
|
||||
struct in_mfilter *imf;
|
||||
#endif
|
||||
#ifdef INET6
|
||||
struct ip6_moptions *im6o = &cif->cif_im6o;
|
||||
struct in6_mfilter *im6f;
|
||||
#endif
|
||||
sx_assert(&carp_sx, SA_XLOCKED);
|
||||
|
||||
switch (sa) {
|
||||
#ifdef INET
|
||||
case AF_INET:
|
||||
if (cif->cif_naddrs == 0) {
|
||||
struct ip_moptions *imo = &cif->cif_imo;
|
||||
|
||||
in_leavegroup(imo->imo_membership[0], NULL);
|
||||
KASSERT(imo->imo_mfilters == NULL,
|
||||
("%s: imo_mfilters != NULL", __func__));
|
||||
free(imo->imo_membership, M_CARP);
|
||||
imo->imo_membership = NULL;
|
||||
if (cif->cif_naddrs != 0)
|
||||
break;
|
||||
|
||||
while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) {
|
||||
ip_mfilter_remove(&imo->imo_head, imf);
|
||||
in_leavegroup(imf->imf_inm, NULL);
|
||||
ip_mfilter_free(imf);
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
#ifdef INET6
|
||||
case AF_INET6:
|
||||
if (cif->cif_naddrs6 == 0) {
|
||||
struct ip6_moptions *im6o = &cif->cif_im6o;
|
||||
if (cif->cif_naddrs6 != 0)
|
||||
break;
|
||||
|
||||
in6_leavegroup(im6o->im6o_membership[0], NULL);
|
||||
in6_leavegroup(im6o->im6o_membership[1], NULL);
|
||||
KASSERT(im6o->im6o_mfilters == NULL,
|
||||
("%s: im6o_mfilters != NULL", __func__));
|
||||
free(im6o->im6o_membership, M_CARP);
|
||||
im6o->im6o_membership = NULL;
|
||||
while ((im6f = ip6_mfilter_first(&im6o->im6o_head)) != NULL) {
|
||||
ip6_mfilter_remove(&im6o->im6o_head, im6f);
|
||||
in6_leavegroup(im6f->im6f_in6m, NULL);
|
||||
ip6_mfilter_free(im6f);
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
|
@ -293,6 +293,7 @@ enum ipfw_opcodes { /* arguments (4 byte each) */
|
||||
O_EXTERNAL_DATA, /* variable length data */
|
||||
|
||||
O_SKIP_ACTION, /* none */
|
||||
O_TCPMSS, /* arg1=MSS value */
|
||||
|
||||
O_LAST_OPCODE /* not an opcode! */
|
||||
};
|
||||
|
@ -1680,7 +1680,6 @@ static void
|
||||
send_packet(struct vif *vifp, struct mbuf *m)
|
||||
{
|
||||
struct ip_moptions imo;
|
||||
struct in_multi *imm[2];
|
||||
int error __unused;
|
||||
|
||||
VIF_LOCK_ASSERT();
|
||||
@ -1689,9 +1688,7 @@ send_packet(struct vif *vifp, struct mbuf *m)
|
||||
imo.imo_multicast_ttl = mtod(m, struct ip *)->ip_ttl - 1;
|
||||
imo.imo_multicast_loop = 1;
|
||||
imo.imo_multicast_vif = -1;
|
||||
imo.imo_num_memberships = 0;
|
||||
imo.imo_max_memberships = 2;
|
||||
imo.imo_membership = &imm[0];
|
||||
STAILQ_INIT(&imo.imo_head);
|
||||
|
||||
/*
|
||||
* Re-entrancy should not be a problem here, because
|
||||
|
@ -82,6 +82,7 @@ struct ipoption {
|
||||
char ipopt_list[MAX_IPOPTLEN]; /* options proper */
|
||||
};
|
||||
|
||||
#if defined(_NETINET_IN_VAR_H_) && defined(_KERNEL)
|
||||
/*
|
||||
* Structure attached to inpcb.ip_moptions and
|
||||
* passed to ip_output when IP multicast options are in use.
|
||||
@ -93,12 +94,11 @@ struct ip_moptions {
|
||||
u_long imo_multicast_vif; /* vif num outgoing multicasts */
|
||||
u_char imo_multicast_ttl; /* TTL for outgoing multicasts */
|
||||
u_char imo_multicast_loop; /* 1 => hear sends if a member */
|
||||
u_short imo_num_memberships; /* no. memberships this socket */
|
||||
u_short imo_max_memberships; /* max memberships this socket */
|
||||
struct in_multi **imo_membership; /* group memberships */
|
||||
struct in_mfilter *imo_mfilters; /* source filters */
|
||||
struct epoch_context imo_epoch_ctx;
|
||||
struct ip_mfilter_head imo_head; /* group membership list */
|
||||
};
|
||||
#else
|
||||
struct ip_moptions;
|
||||
#endif
|
||||
|
||||
struct ipstat {
|
||||
uint64_t ips_total; /* total packets received */
|
||||
|
@ -782,9 +782,11 @@ _in6_ifdetach(struct ifnet *ifp, int purgeulp)
|
||||
in6_purgeaddr(ifa);
|
||||
}
|
||||
if (purgeulp) {
|
||||
IN6_MULTI_LOCK();
|
||||
in6_pcbpurgeif0(&V_udbinfo, ifp);
|
||||
in6_pcbpurgeif0(&V_ulitecbinfo, ifp);
|
||||
in6_pcbpurgeif0(&V_ripcbinfo, ifp);
|
||||
IN6_MULTI_UNLOCK();
|
||||
}
|
||||
/* leave from all multicast groups joined */
|
||||
in6_purgemaddrs(ifp);
|
||||
|
@ -104,7 +104,8 @@ RB_GENERATE(ip6_msource_tree, ip6_msource, im6s_link, ip6_msource_cmp);
|
||||
|
||||
/*
|
||||
* Locking:
|
||||
* - Lock order is: Giant, INP_WLOCK, IN6_MULTI_LOCK, MLD_LOCK, IF_ADDR_LOCK.
|
||||
* - Lock order is: Giant, IN6_MULTI_LOCK, INP_WLOCK,
|
||||
* IN6_MULTI_LIST_LOCK, MLD_LOCK, IF_ADDR_LOCK.
|
||||
* - The IF_ADDR_LOCK is implicitly taken by in6m_lookup() earlier, however
|
||||
* it can be taken by code in net/if.c also.
|
||||
* - ip6_moptions and in6_mfilter are covered by the INP_WLOCK.
|
||||
@ -136,12 +137,11 @@ static int im6f_prune(struct in6_mfilter *, const struct sockaddr_in6 *);
|
||||
static void im6f_purge(struct in6_mfilter *);
|
||||
static void im6f_rollback(struct in6_mfilter *);
|
||||
static void im6f_reap(struct in6_mfilter *);
|
||||
static int im6o_grow(struct ip6_moptions *);
|
||||
static size_t im6o_match_group(const struct ip6_moptions *,
|
||||
static struct in6_mfilter *
|
||||
im6o_match_group(const struct ip6_moptions *,
|
||||
const struct ifnet *, const struct sockaddr *);
|
||||
static struct in6_msource *
|
||||
im6o_match_source(const struct ip6_moptions *, const size_t,
|
||||
const struct sockaddr *);
|
||||
im6o_match_source(struct in6_mfilter *, const struct sockaddr *);
|
||||
static void im6s_merge(struct ip6_msource *ims,
|
||||
const struct in6_msource *lims, const int rollback);
|
||||
static int in6_getmulti(struct ifnet *, const struct in6_addr *,
|
||||
@ -230,55 +230,25 @@ im6f_init(struct in6_mfilter *imf, const int st0, const int st1)
|
||||
imf->im6f_st[1] = st1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Resize the ip6_moptions vector to the next power-of-two minus 1.
|
||||
* May be called with locks held; do not sleep.
|
||||
*/
|
||||
static int
|
||||
im6o_grow(struct ip6_moptions *imo)
|
||||
struct in6_mfilter *
|
||||
ip6_mfilter_alloc(const int mflags, const int st0, const int st1)
|
||||
{
|
||||
struct in6_multi **nmships;
|
||||
struct in6_multi **omships;
|
||||
struct in6_mfilter *nmfilters;
|
||||
struct in6_mfilter *omfilters;
|
||||
size_t idx;
|
||||
size_t newmax;
|
||||
size_t oldmax;
|
||||
struct in6_mfilter *imf;
|
||||
|
||||
nmships = NULL;
|
||||
nmfilters = NULL;
|
||||
omships = imo->im6o_membership;
|
||||
omfilters = imo->im6o_mfilters;
|
||||
oldmax = imo->im6o_max_memberships;
|
||||
newmax = ((oldmax + 1) * 2) - 1;
|
||||
imf = malloc(sizeof(*imf), M_IN6MFILTER, mflags);
|
||||
|
||||
if (newmax <= IPV6_MAX_MEMBERSHIPS) {
|
||||
nmships = (struct in6_multi **)realloc(omships,
|
||||
sizeof(struct in6_multi *) * newmax, M_IP6MOPTS, M_NOWAIT);
|
||||
nmfilters = (struct in6_mfilter *)realloc(omfilters,
|
||||
sizeof(struct in6_mfilter) * newmax, M_IN6MFILTER,
|
||||
M_NOWAIT);
|
||||
if (nmships != NULL && nmfilters != NULL) {
|
||||
/* Initialize newly allocated source filter heads. */
|
||||
for (idx = oldmax; idx < newmax; idx++) {
|
||||
im6f_init(&nmfilters[idx], MCAST_UNDEFINED,
|
||||
MCAST_EXCLUDE);
|
||||
}
|
||||
imo->im6o_max_memberships = newmax;
|
||||
imo->im6o_membership = nmships;
|
||||
imo->im6o_mfilters = nmfilters;
|
||||
}
|
||||
if (imf != NULL)
|
||||
im6f_init(imf, st0, st1);
|
||||
|
||||
return (imf);
|
||||
}
|
||||
|
||||
if (nmships == NULL || nmfilters == NULL) {
|
||||
if (nmships != NULL)
|
||||
free(nmships, M_IP6MOPTS);
|
||||
if (nmfilters != NULL)
|
||||
free(nmfilters, M_IN6MFILTER);
|
||||
return (ETOOMANYREFS);
|
||||
}
|
||||
void
|
||||
ip6_mfilter_free(struct in6_mfilter *imf)
|
||||
{
|
||||
|
||||
return (0);
|
||||
im6f_purge(imf);
|
||||
free(imf, M_IN6MFILTER);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -286,36 +256,27 @@ im6o_grow(struct ip6_moptions *imo)
|
||||
* which matches the specified group, and optionally an interface.
|
||||
* Return its index into the array, or -1 if not found.
|
||||
*/
|
||||
static size_t
|
||||
static struct in6_mfilter *
|
||||
im6o_match_group(const struct ip6_moptions *imo, const struct ifnet *ifp,
|
||||
const struct sockaddr *group)
|
||||
{
|
||||
const struct sockaddr_in6 *gsin6;
|
||||
struct in6_multi **pinm;
|
||||
int idx;
|
||||
int nmships;
|
||||
struct in6_mfilter *imf;
|
||||
struct in6_multi *inm;
|
||||
|
||||
gsin6 = (const struct sockaddr_in6 *)group;
|
||||
|
||||
/* The im6o_membership array may be lazy allocated. */
|
||||
if (imo->im6o_membership == NULL || imo->im6o_num_memberships == 0)
|
||||
return (-1);
|
||||
|
||||
nmships = imo->im6o_num_memberships;
|
||||
pinm = &imo->im6o_membership[0];
|
||||
for (idx = 0; idx < nmships; idx++, pinm++) {
|
||||
if (*pinm == NULL)
|
||||
IP6_MFILTER_FOREACH(imf, &imo->im6o_head) {
|
||||
inm = imf->im6f_in6m;
|
||||
if (inm == NULL)
|
||||
continue;
|
||||
if ((ifp == NULL || ((*pinm)->in6m_ifp == ifp)) &&
|
||||
IN6_ARE_ADDR_EQUAL(&(*pinm)->in6m_addr,
|
||||
if ((ifp == NULL || (inm->in6m_ifp == ifp)) &&
|
||||
IN6_ARE_ADDR_EQUAL(&inm->in6m_addr,
|
||||
&gsin6->sin6_addr)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (idx >= nmships)
|
||||
idx = -1;
|
||||
|
||||
return (idx);
|
||||
return (imf);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -330,22 +291,13 @@ im6o_match_group(const struct ip6_moptions *imo, const struct ifnet *ifp,
|
||||
* it exists, which may not be the desired behaviour.
|
||||
*/
|
||||
static struct in6_msource *
|
||||
im6o_match_source(const struct ip6_moptions *imo, const size_t gidx,
|
||||
const struct sockaddr *src)
|
||||
im6o_match_source(struct in6_mfilter *imf, const struct sockaddr *src)
|
||||
{
|
||||
struct ip6_msource find;
|
||||
struct in6_mfilter *imf;
|
||||
struct ip6_msource *ims;
|
||||
const sockunion_t *psa;
|
||||
|
||||
KASSERT(src->sa_family == AF_INET6, ("%s: !AF_INET6", __func__));
|
||||
KASSERT(gidx != -1 && gidx < imo->im6o_num_memberships,
|
||||
("%s: invalid index %d\n", __func__, (int)gidx));
|
||||
|
||||
/* The im6o_mfilters array may be lazy allocated. */
|
||||
if (imo->im6o_mfilters == NULL)
|
||||
return (NULL);
|
||||
imf = &imo->im6o_mfilters[gidx];
|
||||
|
||||
psa = (const sockunion_t *)src;
|
||||
find.im6s_addr = psa->sin6.sin6_addr;
|
||||
@ -365,14 +317,14 @@ int
|
||||
im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp,
|
||||
const struct sockaddr *group, const struct sockaddr *src)
|
||||
{
|
||||
size_t gidx;
|
||||
struct in6_mfilter *imf;
|
||||
struct in6_msource *ims;
|
||||
int mode;
|
||||
|
||||
KASSERT(ifp != NULL, ("%s: null ifp", __func__));
|
||||
|
||||
gidx = im6o_match_group(imo, ifp, group);
|
||||
if (gidx == -1)
|
||||
imf = im6o_match_group(imo, ifp, group);
|
||||
if (imf == NULL)
|
||||
return (MCAST_NOTGMEMBER);
|
||||
|
||||
/*
|
||||
@ -384,8 +336,8 @@ im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp,
|
||||
* NOTE: We are comparing group state here at MLD t1 (now)
|
||||
* with socket-layer t0 (since last downcall).
|
||||
*/
|
||||
mode = imo->im6o_mfilters[gidx].im6f_st[1];
|
||||
ims = im6o_match_source(imo, gidx, src);
|
||||
mode = imf->im6f_st[1];
|
||||
ims = im6o_match_source(imf, src);
|
||||
|
||||
if ((ims == NULL && mode == MCAST_INCLUDE) ||
|
||||
(ims != NULL && ims->im6sl_st[0] != mode))
|
||||
@ -1449,7 +1401,6 @@ in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
|
||||
struct ip6_moptions *imo;
|
||||
struct in6_msource *ims;
|
||||
struct in6_multi *inm;
|
||||
size_t idx;
|
||||
uint16_t fmode;
|
||||
int error, doblock;
|
||||
#ifdef KTR
|
||||
@ -1506,16 +1457,12 @@ in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
|
||||
* Check if we are actually a member of this group.
|
||||
*/
|
||||
imo = in6p_findmoptions(inp);
|
||||
idx = im6o_match_group(imo, ifp, &gsa->sa);
|
||||
if (idx == -1 || imo->im6o_mfilters == NULL) {
|
||||
imf = im6o_match_group(imo, ifp, &gsa->sa);
|
||||
if (imf == NULL) {
|
||||
error = EADDRNOTAVAIL;
|
||||
goto out_in6p_locked;
|
||||
}
|
||||
|
||||
KASSERT(imo->im6o_mfilters != NULL,
|
||||
("%s: im6o_mfilters not allocated", __func__));
|
||||
imf = &imo->im6o_mfilters[idx];
|
||||
inm = imo->im6o_membership[idx];
|
||||
inm = imf->im6f_in6m;
|
||||
|
||||
/*
|
||||
* Attempting to use the delta-based API on an
|
||||
@ -1533,7 +1480,7 @@ in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
|
||||
* Asked to unblock, but nothing to unblock.
|
||||
* If adding a new block entry, allocate it.
|
||||
*/
|
||||
ims = im6o_match_source(imo, idx, &ssa->sa);
|
||||
ims = im6o_match_source(imf, &ssa->sa);
|
||||
if ((ims != NULL && doblock) || (ims == NULL && !doblock)) {
|
||||
CTR3(KTR_MLD, "%s: source %s %spresent", __func__,
|
||||
ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr),
|
||||
@ -1603,9 +1550,6 @@ static struct ip6_moptions *
|
||||
in6p_findmoptions(struct inpcb *inp)
|
||||
{
|
||||
struct ip6_moptions *imo;
|
||||
struct in6_multi **immp;
|
||||
struct in6_mfilter *imfp;
|
||||
size_t idx;
|
||||
|
||||
INP_WLOCK(inp);
|
||||
if (inp->in6p_moptions != NULL)
|
||||
@ -1614,27 +1558,14 @@ in6p_findmoptions(struct inpcb *inp)
|
||||
INP_WUNLOCK(inp);
|
||||
|
||||
imo = malloc(sizeof(*imo), M_IP6MOPTS, M_WAITOK);
|
||||
immp = malloc(sizeof(*immp) * IPV6_MIN_MEMBERSHIPS, M_IP6MOPTS,
|
||||
M_WAITOK | M_ZERO);
|
||||
imfp = malloc(sizeof(struct in6_mfilter) * IPV6_MIN_MEMBERSHIPS,
|
||||
M_IN6MFILTER, M_WAITOK);
|
||||
|
||||
imo->im6o_multicast_ifp = NULL;
|
||||
imo->im6o_multicast_hlim = V_ip6_defmcasthlim;
|
||||
imo->im6o_multicast_loop = in6_mcast_loop;
|
||||
imo->im6o_num_memberships = 0;
|
||||
imo->im6o_max_memberships = IPV6_MIN_MEMBERSHIPS;
|
||||
imo->im6o_membership = immp;
|
||||
|
||||
/* Initialize per-group source filters. */
|
||||
for (idx = 0; idx < IPV6_MIN_MEMBERSHIPS; idx++)
|
||||
im6f_init(&imfp[idx], MCAST_UNDEFINED, MCAST_EXCLUDE);
|
||||
imo->im6o_mfilters = imfp;
|
||||
STAILQ_INIT(&imo->im6o_head);
|
||||
|
||||
INP_WLOCK(inp);
|
||||
if (inp->in6p_moptions != NULL) {
|
||||
free(imfp, M_IN6MFILTER);
|
||||
free(immp, M_IP6MOPTS);
|
||||
free(imo, M_IP6MOPTS);
|
||||
return (inp->in6p_moptions);
|
||||
}
|
||||
@ -1657,29 +1588,22 @@ inp_gcmoptions(struct ip6_moptions *imo)
|
||||
struct in6_mfilter *imf;
|
||||
struct in6_multi *inm;
|
||||
struct ifnet *ifp;
|
||||
size_t idx, nmships;
|
||||
|
||||
nmships = imo->im6o_num_memberships;
|
||||
for (idx = 0; idx < nmships; ++idx) {
|
||||
imf = imo->im6o_mfilters ? &imo->im6o_mfilters[idx] : NULL;
|
||||
if (imf)
|
||||
while ((imf = ip6_mfilter_first(&imo->im6o_head)) != NULL) {
|
||||
ip6_mfilter_remove(&imo->im6o_head, imf);
|
||||
|
||||
im6f_leave(imf);
|
||||
inm = imo->im6o_membership[idx];
|
||||
ifp = inm->in6m_ifp;
|
||||
if (ifp != NULL) {
|
||||
if ((inm = imf->im6f_in6m) != NULL) {
|
||||
if ((ifp = inm->in6m_ifp) != NULL) {
|
||||
CURVNET_SET(ifp->if_vnet);
|
||||
(void)in6_leavegroup(inm, imf);
|
||||
CURVNET_RESTORE();
|
||||
} else {
|
||||
(void)in6_leavegroup(inm, imf);
|
||||
}
|
||||
if (imf)
|
||||
im6f_purge(imf);
|
||||
}
|
||||
|
||||
if (imo->im6o_mfilters)
|
||||
free(imo->im6o_mfilters, M_IN6MFILTER);
|
||||
free(imo->im6o_membership, M_IP6MOPTS);
|
||||
ip6_mfilter_free(imf);
|
||||
}
|
||||
free(imo, M_IP6MOPTS);
|
||||
}
|
||||
|
||||
@ -1709,7 +1633,7 @@ in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt)
|
||||
struct sockaddr_storage *ptss;
|
||||
struct sockaddr_storage *tss;
|
||||
int error;
|
||||
size_t idx, nsrcs, ncsrcs;
|
||||
size_t nsrcs, ncsrcs;
|
||||
|
||||
INP_WLOCK_ASSERT(inp);
|
||||
|
||||
@ -1743,12 +1667,11 @@ in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt)
|
||||
/*
|
||||
* Lookup group on the socket.
|
||||
*/
|
||||
idx = im6o_match_group(imo, ifp, &gsa->sa);
|
||||
if (idx == -1 || imo->im6o_mfilters == NULL) {
|
||||
imf = im6o_match_group(imo, ifp, &gsa->sa);
|
||||
if (imf == NULL) {
|
||||
INP_WUNLOCK(inp);
|
||||
return (EADDRNOTAVAIL);
|
||||
}
|
||||
imf = &imo->im6o_mfilters[idx];
|
||||
|
||||
/*
|
||||
* Ignore memberships which are in limbo.
|
||||
@ -1945,15 +1868,12 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
struct ip6_moptions *imo;
|
||||
struct in6_multi *inm;
|
||||
struct in6_msource *lims;
|
||||
size_t idx;
|
||||
int error, is_new;
|
||||
|
||||
SLIST_INIT(&inmh);
|
||||
ifp = NULL;
|
||||
imf = NULL;
|
||||
lims = NULL;
|
||||
error = 0;
|
||||
is_new = 0;
|
||||
|
||||
memset(&gsr, 0, sizeof(struct group_source_req));
|
||||
gsa = (sockunion_t *)&gsr.gsr_group;
|
||||
@ -2054,13 +1974,25 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
*/
|
||||
(void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL);
|
||||
|
||||
IN6_MULTI_LOCK();
|
||||
|
||||
/*
|
||||
* Find the membership in the membership list.
|
||||
*/
|
||||
imo = in6p_findmoptions(inp);
|
||||
idx = im6o_match_group(imo, ifp, &gsa->sa);
|
||||
if (idx == -1) {
|
||||
imf = im6o_match_group(imo, ifp, &gsa->sa);
|
||||
if (imf == NULL) {
|
||||
is_new = 1;
|
||||
inm = NULL;
|
||||
|
||||
if (ip6_mfilter_count(&imo->im6o_head) >= IPV6_MAX_MEMBERSHIPS) {
|
||||
error = ENOMEM;
|
||||
goto out_in6p_locked;
|
||||
}
|
||||
} else {
|
||||
inm = imo->im6o_membership[idx];
|
||||
imf = &imo->im6o_mfilters[idx];
|
||||
is_new = 0;
|
||||
inm = imf->im6f_in6m;
|
||||
|
||||
if (ssa->ss.ss_family != AF_UNSPEC) {
|
||||
/*
|
||||
* MCAST_JOIN_SOURCE_GROUP on an exclusive membership
|
||||
@ -2087,7 +2019,7 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
* full-state SSM API with the delta-based API,
|
||||
* which is discouraged in the relevant RFCs.
|
||||
*/
|
||||
lims = im6o_match_source(imo, idx, &ssa->sa);
|
||||
lims = im6o_match_source(imf, &ssa->sa);
|
||||
if (lims != NULL /*&&
|
||||
lims->im6sl_st[1] == MCAST_INCLUDE*/) {
|
||||
error = EADDRNOTAVAIL;
|
||||
@ -2115,27 +2047,6 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
*/
|
||||
INP_WLOCK_ASSERT(inp);
|
||||
|
||||
if (is_new) {
|
||||
if (imo->im6o_num_memberships == imo->im6o_max_memberships) {
|
||||
error = im6o_grow(imo);
|
||||
if (error)
|
||||
goto out_in6p_locked;
|
||||
}
|
||||
/*
|
||||
* Allocate the new slot upfront so we can deal with
|
||||
* grafting the new source filter in same code path
|
||||
* as for join-source on existing membership.
|
||||
*/
|
||||
idx = imo->im6o_num_memberships;
|
||||
imo->im6o_membership[idx] = NULL;
|
||||
imo->im6o_num_memberships++;
|
||||
KASSERT(imo->im6o_mfilters != NULL,
|
||||
("%s: im6f_mfilters vector was not allocated", __func__));
|
||||
imf = &imo->im6o_mfilters[idx];
|
||||
KASSERT(RB_EMPTY(&imf->im6f_sources),
|
||||
("%s: im6f_sources not empty", __func__));
|
||||
}
|
||||
|
||||
/*
|
||||
* Graft new source into filter list for this inpcb's
|
||||
* membership of the group. The in6_multi may not have
|
||||
@ -2151,7 +2062,11 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
/* Membership starts in IN mode */
|
||||
if (is_new) {
|
||||
CTR1(KTR_MLD, "%s: new join w/source", __func__);
|
||||
im6f_init(imf, MCAST_UNDEFINED, MCAST_INCLUDE);
|
||||
imf = ip6_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_INCLUDE);
|
||||
if (imf == NULL) {
|
||||
error = ENOMEM;
|
||||
goto out_in6p_locked;
|
||||
}
|
||||
} else {
|
||||
CTR2(KTR_MLD, "%s: %s source", __func__, "allow");
|
||||
}
|
||||
@ -2160,81 +2075,88 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
CTR1(KTR_MLD, "%s: merge imf state failed",
|
||||
__func__);
|
||||
error = ENOMEM;
|
||||
goto out_im6o_free;
|
||||
goto out_in6p_locked;
|
||||
}
|
||||
} else {
|
||||
/* No address specified; Membership starts in EX mode */
|
||||
if (is_new) {
|
||||
CTR1(KTR_MLD, "%s: new join w/o source", __func__);
|
||||
im6f_init(imf, MCAST_UNDEFINED, MCAST_EXCLUDE);
|
||||
imf = ip6_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_EXCLUDE);
|
||||
if (imf == NULL) {
|
||||
error = ENOMEM;
|
||||
goto out_in6p_locked;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Begin state merge transaction at MLD layer.
|
||||
*/
|
||||
if (is_new) {
|
||||
in_pcbref(inp);
|
||||
INP_WUNLOCK(inp);
|
||||
IN6_MULTI_LOCK();
|
||||
|
||||
if (is_new) {
|
||||
error = in6_joingroup_locked(ifp, &gsa->sin6.sin6_addr, imf,
|
||||
&inm, 0);
|
||||
&imf->im6f_in6m, 0);
|
||||
|
||||
INP_WLOCK(inp);
|
||||
if (in_pcbrele_wlocked(inp)) {
|
||||
error = ENXIO;
|
||||
goto out_in6p_unlocked;
|
||||
}
|
||||
if (error) {
|
||||
IN6_MULTI_UNLOCK();
|
||||
goto out_im6o_free;
|
||||
goto out_in6p_locked;
|
||||
}
|
||||
/*
|
||||
* NOTE: Refcount from in6_joingroup_locked()
|
||||
* is protecting membership.
|
||||
*/
|
||||
imo->im6o_membership[idx] = inm;
|
||||
} else {
|
||||
CTR1(KTR_MLD, "%s: merge inm state", __func__);
|
||||
IN6_MULTI_LIST_LOCK();
|
||||
error = in6m_merge(inm, imf);
|
||||
if (error)
|
||||
if (error) {
|
||||
CTR1(KTR_MLD, "%s: failed to merge inm state",
|
||||
__func__);
|
||||
else {
|
||||
IN6_MULTI_LIST_UNLOCK();
|
||||
im6f_rollback(imf);
|
||||
im6f_reap(imf);
|
||||
goto out_in6p_locked;
|
||||
}
|
||||
CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
|
||||
error = mld_change_state(inm, 0);
|
||||
if (error)
|
||||
IN6_MULTI_LIST_UNLOCK();
|
||||
|
||||
if (error) {
|
||||
CTR1(KTR_MLD, "%s: failed mld downcall",
|
||||
__func__);
|
||||
}
|
||||
IN6_MULTI_LIST_UNLOCK();
|
||||
}
|
||||
|
||||
IN6_MULTI_UNLOCK();
|
||||
INP_WLOCK(inp);
|
||||
if (in_pcbrele_wlocked(inp))
|
||||
return (ENXIO);
|
||||
if (error) {
|
||||
im6f_rollback(imf);
|
||||
if (is_new)
|
||||
im6f_purge(imf);
|
||||
else
|
||||
im6f_reap(imf);
|
||||
} else {
|
||||
im6f_commit(imf);
|
||||
goto out_in6p_locked;
|
||||
}
|
||||
}
|
||||
|
||||
out_im6o_free:
|
||||
if (error && is_new) {
|
||||
inm = imo->im6o_membership[idx];
|
||||
if (inm != NULL) {
|
||||
IN6_MULTI_LIST_LOCK();
|
||||
in6m_rele_locked(&inmh, inm);
|
||||
IN6_MULTI_LIST_UNLOCK();
|
||||
}
|
||||
imo->im6o_membership[idx] = NULL;
|
||||
--imo->im6o_num_memberships;
|
||||
}
|
||||
if (is_new)
|
||||
ip6_mfilter_insert(&imo->im6o_head, imf);
|
||||
|
||||
im6f_commit(imf);
|
||||
imf = NULL;
|
||||
|
||||
out_in6p_locked:
|
||||
INP_WUNLOCK(inp);
|
||||
out_in6p_unlocked:
|
||||
IN6_MULTI_UNLOCK();
|
||||
|
||||
if (is_new && imf) {
|
||||
if (imf->im6f_in6m != NULL) {
|
||||
struct in6_multi_head inmh;
|
||||
|
||||
SLIST_INIT(&inmh);
|
||||
SLIST_INSERT_HEAD(&inmh, imf->im6f_in6m, in6m_defer);
|
||||
in6m_release_list_deferred(&inmh);
|
||||
}
|
||||
ip6_mfilter_free(imf);
|
||||
}
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -2253,8 +2175,8 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
struct in6_msource *ims;
|
||||
struct in6_multi *inm;
|
||||
uint32_t ifindex;
|
||||
size_t idx;
|
||||
int error, is_final;
|
||||
int error;
|
||||
bool is_final;
|
||||
#ifdef KTR
|
||||
char ip6tbuf[INET6_ADDRSTRLEN];
|
||||
#endif
|
||||
@ -2262,7 +2184,7 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
ifp = NULL;
|
||||
ifindex = 0;
|
||||
error = 0;
|
||||
is_final = 1;
|
||||
is_final = true;
|
||||
|
||||
memset(&gsr, 0, sizeof(struct group_source_req));
|
||||
gsa = (sockunion_t *)&gsr.gsr_group;
|
||||
@ -2380,20 +2302,21 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
CTR2(KTR_MLD, "%s: ifp = %p", __func__, ifp);
|
||||
KASSERT(ifp != NULL, ("%s: ifp did not resolve", __func__));
|
||||
|
||||
IN6_MULTI_LOCK();
|
||||
|
||||
/*
|
||||
* Find the membership in the membership array.
|
||||
* Find the membership in the membership list.
|
||||
*/
|
||||
imo = in6p_findmoptions(inp);
|
||||
idx = im6o_match_group(imo, ifp, &gsa->sa);
|
||||
if (idx == -1) {
|
||||
imf = im6o_match_group(imo, ifp, &gsa->sa);
|
||||
if (imf == NULL) {
|
||||
error = EADDRNOTAVAIL;
|
||||
goto out_in6p_locked;
|
||||
}
|
||||
inm = imo->im6o_membership[idx];
|
||||
imf = &imo->im6o_mfilters[idx];
|
||||
inm = imf->im6f_in6m;
|
||||
|
||||
if (ssa->ss.ss_family != AF_UNSPEC)
|
||||
is_final = 0;
|
||||
is_final = false;
|
||||
|
||||
/*
|
||||
* Begin state merge transaction at socket layer.
|
||||
@ -2405,13 +2328,14 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
* MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships.
|
||||
*/
|
||||
if (is_final) {
|
||||
ip6_mfilter_remove(&imo->im6o_head, imf);
|
||||
im6f_leave(imf);
|
||||
} else {
|
||||
if (imf->im6f_st[0] == MCAST_EXCLUDE) {
|
||||
error = EADDRNOTAVAIL;
|
||||
goto out_in6p_locked;
|
||||
}
|
||||
ims = im6o_match_source(imo, idx, &ssa->sa);
|
||||
ims = im6o_match_source(imf, &ssa->sa);
|
||||
if (ims == NULL) {
|
||||
CTR3(KTR_MLD, "%s: source %p %spresent", __func__,
|
||||
ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr),
|
||||
@ -2431,60 +2355,47 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
|
||||
/*
|
||||
* Begin state merge transaction at MLD layer.
|
||||
*/
|
||||
in_pcbref(inp);
|
||||
INP_WUNLOCK(inp);
|
||||
IN6_MULTI_LOCK();
|
||||
if (!is_final) {
|
||||
CTR1(KTR_MLD, "%s: merge inm state", __func__);
|
||||
IN6_MULTI_LIST_LOCK();
|
||||
error = in6m_merge(inm, imf);
|
||||
if (error) {
|
||||
CTR1(KTR_MLD, "%s: failed to merge inm state",
|
||||
__func__);
|
||||
IN6_MULTI_LIST_UNLOCK();
|
||||
im6f_rollback(imf);
|
||||
im6f_reap(imf);
|
||||
goto out_in6p_locked;
|
||||
}
|
||||
|
||||
if (is_final) {
|
||||
CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
|
||||
error = mld_change_state(inm, 0);
|
||||
IN6_MULTI_LIST_UNLOCK();
|
||||
if (error) {
|
||||
CTR1(KTR_MLD, "%s: failed mld downcall",
|
||||
__func__);
|
||||
im6f_rollback(imf);
|
||||
im6f_reap(imf);
|
||||
goto out_in6p_locked;
|
||||
}
|
||||
}
|
||||
|
||||
im6f_commit(imf);
|
||||
im6f_reap(imf);
|
||||
|
||||
out_in6p_locked:
|
||||
INP_WUNLOCK(inp);
|
||||
|
||||
if (is_final && imf) {
|
||||
/*
|
||||
* Give up the multicast address record to which
|
||||
* the membership points.
|
||||
*/
|
||||
(void)in6_leavegroup_locked(inm, imf);
|
||||
} else {
|
||||
CTR1(KTR_MLD, "%s: merge inm state", __func__);
|
||||
IN6_MULTI_LIST_LOCK();
|
||||
error = in6m_merge(inm, imf);
|
||||
if (error)
|
||||
CTR1(KTR_MLD, "%s: failed to merge inm state",
|
||||
__func__);
|
||||
else {
|
||||
CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
|
||||
error = mld_change_state(inm, 0);
|
||||
if (error)
|
||||
CTR1(KTR_MLD, "%s: failed mld downcall",
|
||||
__func__);
|
||||
}
|
||||
IN6_MULTI_LIST_UNLOCK();
|
||||
ip6_mfilter_free(imf);
|
||||
}
|
||||
|
||||
IN6_MULTI_UNLOCK();
|
||||
INP_WLOCK(inp);
|
||||
if (in_pcbrele_wlocked(inp))
|
||||
return (ENXIO);
|
||||
|
||||
if (error)
|
||||
im6f_rollback(imf);
|
||||
else
|
||||
im6f_commit(imf);
|
||||
|
||||
im6f_reap(imf);
|
||||
|
||||
if (is_final) {
|
||||
/* Remove the gap in the membership array. */
|
||||
KASSERT(RB_EMPTY(&imf->im6f_sources),
|
||||
("%s: im6f_sources not empty", __func__));
|
||||
for (++idx; idx < imo->im6o_num_memberships; ++idx) {
|
||||
imo->im6o_membership[idx - 1] = imo->im6o_membership[idx];
|
||||
imo->im6o_mfilters[idx - 1] = imo->im6o_mfilters[idx];
|
||||
}
|
||||
im6f_init(&imo->im6o_mfilters[idx - 1], MCAST_UNDEFINED,
|
||||
MCAST_EXCLUDE);
|
||||
imo->im6o_num_memberships--;
|
||||
}
|
||||
|
||||
out_in6p_locked:
|
||||
INP_WUNLOCK(inp);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -2542,7 +2453,6 @@ in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
|
||||
struct in6_mfilter *imf;
|
||||
struct ip6_moptions *imo;
|
||||
struct in6_multi *inm;
|
||||
size_t idx;
|
||||
int error;
|
||||
|
||||
error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq),
|
||||
@ -2579,13 +2489,12 @@ in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
|
||||
* Check if this socket is a member of this group.
|
||||
*/
|
||||
imo = in6p_findmoptions(inp);
|
||||
idx = im6o_match_group(imo, ifp, &gsa->sa);
|
||||
if (idx == -1 || imo->im6o_mfilters == NULL) {
|
||||
imf = im6o_match_group(imo, ifp, &gsa->sa);
|
||||
if (imf == NULL) {
|
||||
error = EADDRNOTAVAIL;
|
||||
goto out_in6p_locked;
|
||||
}
|
||||
inm = imo->im6o_membership[idx];
|
||||
imf = &imo->im6o_mfilters[idx];
|
||||
inm = imf->im6f_in6m;
|
||||
|
||||
/*
|
||||
* Begin state merge transaction at socket layer.
|
||||
|
@ -817,8 +817,9 @@ void
|
||||
in6_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp)
|
||||
{
|
||||
struct inpcb *in6p;
|
||||
struct in6_multi *inm;
|
||||
struct in6_mfilter *imf;
|
||||
struct ip6_moptions *im6o;
|
||||
int i, gap;
|
||||
|
||||
INP_INFO_WLOCK(pcbinfo);
|
||||
CK_LIST_FOREACH(in6p, pcbinfo->ipi_listhead, inp_list) {
|
||||
@ -839,19 +840,19 @@ in6_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp)
|
||||
* Drop multicast group membership if we joined
|
||||
* through the interface being detached.
|
||||
*/
|
||||
gap = 0;
|
||||
for (i = 0; i < im6o->im6o_num_memberships; i++) {
|
||||
if (im6o->im6o_membership[i]->in6m_ifp ==
|
||||
ifp) {
|
||||
in6_leavegroup(im6o->im6o_membership[i], NULL);
|
||||
gap++;
|
||||
} else if (gap != 0) {
|
||||
im6o->im6o_membership[i - gap] =
|
||||
im6o->im6o_membership[i];
|
||||
restart:
|
||||
IP6_MFILTER_FOREACH(imf, &im6o->im6o_head) {
|
||||
if ((inm = imf->im6f_in6m) == NULL)
|
||||
continue;
|
||||
if (inm->in6m_ifp != ifp)
|
||||
continue;
|
||||
ip6_mfilter_remove(&im6o->im6o_head, imf);
|
||||
IN6_MULTI_LOCK_ASSERT();
|
||||
in6_leavegroup_locked(inm, NULL);
|
||||
ip6_mfilter_free(imf);
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
im6o->im6o_num_memberships -= gap;
|
||||
}
|
||||
INP_WUNLOCK(in6p);
|
||||
}
|
||||
INP_INFO_WUNLOCK(pcbinfo);
|
||||
|
@ -602,8 +602,60 @@ struct in6_mfilter {
|
||||
struct ip6_msource_tree im6f_sources; /* source list for (S,G) */
|
||||
u_long im6f_nsrc; /* # of source entries */
|
||||
uint8_t im6f_st[2]; /* state before/at commit */
|
||||
struct in6_multi *im6f_in6m; /* associated multicast address */
|
||||
STAILQ_ENTRY(in6_mfilter) im6f_entry; /* list entry */
|
||||
};
|
||||
|
||||
/*
|
||||
* Helper types and functions for IPv4 multicast filters.
|
||||
*/
|
||||
STAILQ_HEAD(ip6_mfilter_head, in6_mfilter);
|
||||
|
||||
struct in6_mfilter *ip6_mfilter_alloc(int mflags, int st0, int st1);
|
||||
void ip6_mfilter_free(struct in6_mfilter *);
|
||||
|
||||
static inline void
|
||||
ip6_mfilter_init(struct ip6_mfilter_head *head)
|
||||
{
|
||||
|
||||
STAILQ_INIT(head);
|
||||
}
|
||||
|
||||
static inline struct in6_mfilter *
|
||||
ip6_mfilter_first(const struct ip6_mfilter_head *head)
|
||||
{
|
||||
|
||||
return (STAILQ_FIRST(head));
|
||||
}
|
||||
|
||||
static inline void
|
||||
ip6_mfilter_insert(struct ip6_mfilter_head *head, struct in6_mfilter *imf)
|
||||
{
|
||||
|
||||
STAILQ_INSERT_TAIL(head, imf, im6f_entry);
|
||||
}
|
||||
|
||||
static inline void
|
||||
ip6_mfilter_remove(struct ip6_mfilter_head *head, struct in6_mfilter *imf)
|
||||
{
|
||||
|
||||
STAILQ_REMOVE(head, imf, in6_mfilter, im6f_entry);
|
||||
}
|
||||
|
||||
#define IP6_MFILTER_FOREACH(imf, head) \
|
||||
STAILQ_FOREACH(imf, head, im6f_entry)
|
||||
|
||||
static inline size_t
|
||||
ip6_mfilter_count(struct ip6_mfilter_head *head)
|
||||
{
|
||||
struct in6_mfilter *imf;
|
||||
size_t num = 0;
|
||||
|
||||
STAILQ_FOREACH(imf, head, im6f_entry)
|
||||
num++;
|
||||
return (num);
|
||||
}
|
||||
|
||||
/*
|
||||
* Legacy KAME IPv6 multicast membership descriptor.
|
||||
*/
|
||||
|
@ -110,6 +110,7 @@ struct ip6_direct_ctx {
|
||||
uint32_t ip6dc_off; /* offset to next header */
|
||||
};
|
||||
|
||||
#if defined(_NETINET6_IN6_VAR_H_) && defined(_KERNEL)
|
||||
/*
|
||||
* Structure attached to inpcb.in6p_moptions and
|
||||
* passed to ip6_output when IPv6 multicast options are in use.
|
||||
@ -119,13 +120,11 @@ struct ip6_moptions {
|
||||
struct ifnet *im6o_multicast_ifp; /* ifp for outgoing multicasts */
|
||||
u_char im6o_multicast_hlim; /* hoplimit for outgoing multicasts */
|
||||
u_char im6o_multicast_loop; /* 1 >= hear sends if a member */
|
||||
u_short im6o_num_memberships; /* no. memberships this socket */
|
||||
u_short im6o_max_memberships; /* max memberships this socket */
|
||||
struct in6_multi **im6o_membership; /* group memberships */
|
||||
struct in6_mfilter *im6o_mfilters; /* source filters */
|
||||
struct epoch_context imo6_epoch_ctx;
|
||||
struct ip6_mfilter_head im6o_head; /* group membership list */
|
||||
};
|
||||
|
||||
#else
|
||||
struct ip6_moptions;
|
||||
#endif
|
||||
/*
|
||||
* Control options for outgoing packets
|
||||
*/
|
||||
|
@ -266,7 +266,7 @@ static void pfsync_push(struct pfsync_bucket *);
|
||||
static void pfsync_push_all(struct pfsync_softc *);
|
||||
static void pfsyncintr(void *);
|
||||
static int pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *,
|
||||
void *);
|
||||
struct in_mfilter *imf);
|
||||
static void pfsync_multicast_cleanup(struct pfsync_softc *);
|
||||
static void pfsync_pointers_init(void);
|
||||
static void pfsync_pointers_uninit(void);
|
||||
@ -432,7 +432,6 @@ pfsync_clone_destroy(struct ifnet *ifp)
|
||||
pfsync_drop(sc);
|
||||
|
||||
if_free(ifp);
|
||||
if (sc->sc_imo.imo_membership)
|
||||
pfsync_multicast_cleanup(sc);
|
||||
mtx_destroy(&sc->sc_mtx);
|
||||
mtx_destroy(&sc->sc_bulk_mtx);
|
||||
@ -1375,10 +1374,9 @@ pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
|
||||
|
||||
case SIOCSETPFSYNC:
|
||||
{
|
||||
struct ip_moptions *imo = &sc->sc_imo;
|
||||
struct in_mfilter *imf = NULL;
|
||||
struct ifnet *sifp;
|
||||
struct ip *ip;
|
||||
void *mship = NULL;
|
||||
|
||||
if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
|
||||
return (error);
|
||||
@ -1398,8 +1396,7 @@ pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
|
||||
pfsyncr.pfsyncr_syncpeer.s_addr == 0 ||
|
||||
pfsyncr.pfsyncr_syncpeer.s_addr ==
|
||||
htonl(INADDR_PFSYNC_GROUP)))
|
||||
mship = malloc((sizeof(struct in_multi *) *
|
||||
IP_MIN_MEMBERSHIPS), M_PFSYNC, M_WAITOK | M_ZERO);
|
||||
imf = ip_mfilter_alloc(M_WAITOK, 0, 0);
|
||||
|
||||
PFSYNC_LOCK(sc);
|
||||
if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
|
||||
@ -1421,7 +1418,6 @@ pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
|
||||
if (sc->sc_sync_if)
|
||||
if_rele(sc->sc_sync_if);
|
||||
sc->sc_sync_if = NULL;
|
||||
if (imo->imo_membership)
|
||||
pfsync_multicast_cleanup(sc);
|
||||
PFSYNC_UNLOCK(sc);
|
||||
break;
|
||||
@ -1438,14 +1434,13 @@ pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
|
||||
PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]);
|
||||
}
|
||||
|
||||
if (imo->imo_membership)
|
||||
pfsync_multicast_cleanup(sc);
|
||||
|
||||
if (sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
|
||||
error = pfsync_multicast_setup(sc, sifp, mship);
|
||||
error = pfsync_multicast_setup(sc, sifp, imf);
|
||||
if (error) {
|
||||
if_rele(sifp);
|
||||
free(mship, M_PFSYNC);
|
||||
ip_mfilter_free(imf);
|
||||
PFSYNC_UNLOCK(sc);
|
||||
return (error);
|
||||
}
|
||||
@ -2355,7 +2350,8 @@ pfsyncintr(void *arg)
|
||||
}
|
||||
|
||||
static int
|
||||
pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp, void *mship)
|
||||
pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp,
|
||||
struct in_mfilter *imf)
|
||||
{
|
||||
struct ip_moptions *imo = &sc->sc_imo;
|
||||
int error;
|
||||
@ -2363,16 +2359,14 @@ pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp, void *mship)
|
||||
if (!(ifp->if_flags & IFF_MULTICAST))
|
||||
return (EADDRNOTAVAIL);
|
||||
|
||||
imo->imo_membership = (struct in_multi **)mship;
|
||||
imo->imo_max_memberships = IP_MIN_MEMBERSHIPS;
|
||||
imo->imo_multicast_vif = -1;
|
||||
|
||||
if ((error = in_joingroup(ifp, &sc->sc_sync_peer, NULL,
|
||||
&imo->imo_membership[0])) != 0) {
|
||||
imo->imo_membership = NULL;
|
||||
&imf->imf_inm)) != 0)
|
||||
return (error);
|
||||
}
|
||||
imo->imo_num_memberships++;
|
||||
|
||||
ip_mfilter_init(&imo->imo_head);
|
||||
ip_mfilter_insert(&imo->imo_head, imf);
|
||||
imo->imo_multicast_ifp = ifp;
|
||||
imo->imo_multicast_ttl = PFSYNC_DFLTTL;
|
||||
imo->imo_multicast_loop = 0;
|
||||
@ -2384,10 +2378,13 @@ static void
|
||||
pfsync_multicast_cleanup(struct pfsync_softc *sc)
|
||||
{
|
||||
struct ip_moptions *imo = &sc->sc_imo;
|
||||
struct in_mfilter *imf;
|
||||
|
||||
in_leavegroup(imo->imo_membership[0], NULL);
|
||||
free(imo->imo_membership, M_PFSYNC);
|
||||
imo->imo_membership = NULL;
|
||||
while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) {
|
||||
ip_mfilter_remove(&imo->imo_head, imf);
|
||||
in_leavegroup(imf->imf_inm, NULL);
|
||||
ip_mfilter_free(imf);
|
||||
}
|
||||
imo->imo_multicast_ifp = NULL;
|
||||
}
|
||||
|
||||
@ -2406,7 +2403,7 @@ pfsync_detach_ifnet(struct ifnet *ifp)
|
||||
* is going away. We do need to ensure we don't try to do
|
||||
* cleanup later.
|
||||
*/
|
||||
sc->sc_imo.imo_membership = NULL;
|
||||
ip_mfilter_init(&sc->sc_imo.imo_head);
|
||||
sc->sc_imo.imo_multicast_ifp = NULL;
|
||||
sc->sc_sync_if = NULL;
|
||||
}
|
||||
|
@ -188,6 +188,7 @@
|
||||
#define IBMPOWERPCA2 0x0049
|
||||
#define IBMPOWER7PLUS 0x004a
|
||||
#define IBMPOWER8E 0x004b
|
||||
#define IBMPOWER8NVL 0x004c
|
||||
#define IBMPOWER8 0x004d
|
||||
#define IBMPOWER9 0x004e
|
||||
#define MPC860 0x0050
|
||||
|
@ -103,6 +103,7 @@ void sbuf_start_section(struct sbuf *, ssize_t *);
|
||||
ssize_t sbuf_end_section(struct sbuf *, ssize_t, size_t, int);
|
||||
void sbuf_hexdump(struct sbuf *, const void *, int, const char *,
|
||||
int);
|
||||
int sbuf_count_drain(void *arg, const char *data, int len);
|
||||
void sbuf_putbuf(struct sbuf *);
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
@ -85,6 +85,7 @@ struct thread;
|
||||
#define SLEEPQ_SX 0x03 /* Used by an sx lock. */
|
||||
#define SLEEPQ_LK 0x04 /* Used by a lockmgr. */
|
||||
#define SLEEPQ_INTERRUPTIBLE 0x100 /* Sleep is interruptible. */
|
||||
#define SLEEPQ_UNFAIR 0x200 /* Unfair wakeup order. */
|
||||
|
||||
void init_sleepqueues(void);
|
||||
int sleepq_abort(struct thread *td, int intrval);
|
||||
|
@ -613,6 +613,7 @@ int pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr,
|
||||
_sleep((chan), NULL, (pri), (wmesg), (bt), (pr), (flags))
|
||||
void wakeup(void * chan);
|
||||
void wakeup_one(void * chan);
|
||||
void wakeup_any(void * chan);
|
||||
|
||||
/*
|
||||
* Common `struct cdev *' stuff are declared here to avoid #include poisoning
|
||||
|
@ -1128,7 +1128,10 @@
|
||||
#define enc_xform_skipjack _bsd_enc_xform_skipjack
|
||||
#define eopnotsupp _bsd_eopnotsupp
|
||||
#define epoch_call _bsd_epoch_call
|
||||
#define epoch_drain_callbacks _bsd_epoch_drain_callbacks
|
||||
#define epoch_enter _bsd_epoch_enter
|
||||
#define epoch_enter_preempt _bsd_epoch_enter_preempt
|
||||
#define epoch_exit _bsd_epoch_exit
|
||||
#define epoch_exit_preempt _bsd_epoch_exit_preempt
|
||||
#define epoch_init _bsd_epoch_init
|
||||
#define epoch_wait _bsd_epoch_wait
|
||||
@ -2278,6 +2281,8 @@
|
||||
#define ip6_maxfrags _bsd_ip6_maxfrags
|
||||
#define ip6_maxfragsperpacket _bsd_ip6_maxfragsperpacket
|
||||
#define ip6_mcast_pmtu _bsd_ip6_mcast_pmtu
|
||||
#define ip6_mfilter_alloc _bsd_ip6_mfilter_alloc
|
||||
#define ip6_mfilter_free _bsd_ip6_mfilter_free
|
||||
#define ip6_mforward _bsd_ip6_mforward
|
||||
#define ip6_mloopback _bsd_ip6_mloopback
|
||||
#define ip6_mrouter _bsd_ip6_mrouter
|
||||
@ -2353,6 +2358,8 @@
|
||||
#define ip_input _bsd_ip_input
|
||||
#define ip_insertoptions _bsd_ip_insertoptions
|
||||
#define ip_mcast_src _bsd_ip_mcast_src
|
||||
#define ip_mfilter_alloc _bsd_ip_mfilter_alloc
|
||||
#define ip_mfilter_free _bsd_ip_mfilter_free
|
||||
#define ip_mforward _bsd_ip_mforward
|
||||
#define ip_mrouter _bsd_ip_mrouter
|
||||
#define ip_mrouter_done _bsd_ip_mrouter_done
|
||||
@ -3986,6 +3993,7 @@
|
||||
#define sbuf_clear _bsd_sbuf_clear
|
||||
#define sbuf_clear_flags _bsd_sbuf_clear_flags
|
||||
#define sbuf_copyin _bsd_sbuf_copyin
|
||||
#define sbuf_count_drain _bsd_sbuf_count_drain
|
||||
#define sbuf_cpy _bsd_sbuf_cpy
|
||||
#define sbuf_data _bsd_sbuf_data
|
||||
#define sbuf_delete _bsd_sbuf_delete
|
||||
@ -4498,6 +4506,8 @@
|
||||
#define Skein_512_Output _bsd_Skein_512_Output
|
||||
#define Skein_512_Process_Block _bsd_Skein_512_Process_Block
|
||||
#define Skein_512_Update _bsd_Skein_512_Update
|
||||
#define Skein_Get64_LSB_First _bsd_Skein_Get64_LSB_First
|
||||
#define Skein_Put64_LSB_First _bsd_Skein_Put64_LSB_First
|
||||
#define skipjack_backwards _bsd_skipjack_backwards
|
||||
#define skipjack_forwards _bsd_skipjack_forwards
|
||||
#define sl_compress_init _bsd_sl_compress_init
|
||||
@ -5440,6 +5450,7 @@
|
||||
#define vsnrprintf _bsd_vsnrprintf
|
||||
#define vsprintf _bsd_vsprintf
|
||||
#define wakeup _bsd_wakeup
|
||||
#define wakeup_any _bsd_wakeup_any
|
||||
#define wakeup_one _bsd_wakeup_one
|
||||
#define window_deflate _bsd_window_deflate
|
||||
#define window_inflate _bsd_window_inflate
|
||||
|
Loading…
x
Reference in New Issue
Block a user