Update to FreeBSD stable/12 2019-06-26

Git mirror commit aa83598855d14cdbf7aef6b05d0617e90f87ca2f.
This commit is contained in:
Sebastian Huber 2019-06-26 09:03:26 +02:00
parent 0659f7b126
commit 7ece7548c1
20 changed files with 294 additions and 135 deletions

@ -1 +1 @@
Subproject commit 78576620f2689e23144a1cf1bf55106cc6abe2b7 Subproject commit aa83598855d14cdbf7aef6b05d0617e90f87ca2f

View File

@ -149,6 +149,7 @@ struct ifa_order_elt {
TAILQ_HEAD(ifa_queue, ifa_order_elt); TAILQ_HEAD(ifa_queue, ifa_order_elt);
#ifndef __rtems__
static struct module_map_entry { static struct module_map_entry {
const char *ifname; const char *ifname;
const char *kldname; const char *kldname;
@ -172,6 +173,7 @@ static struct module_map_entry {
.kldname = "if_enc", .kldname = "if_enc",
}, },
}; };
#endif /* __rtems__ */
void void
@ -436,7 +438,7 @@ int
rtems_bsd_command_ifconfig(int argc, char *argv[]) rtems_bsd_command_ifconfig(int argc, char *argv[])
{ {
int exit_code; int exit_code;
const void *data_begin; void *data_begin;
size_t data_size; size_t data_size;
data_begin = RTEMS_LINKER_SET_BEGIN(bsd_prog_ifconfig); data_begin = RTEMS_LINKER_SET_BEGIN(bsd_prog_ifconfig);

View File

@ -263,10 +263,14 @@ static int em_setup_msix(if_ctx_t ctx);
static void em_initialize_transmit_unit(if_ctx_t ctx); static void em_initialize_transmit_unit(if_ctx_t ctx);
static void em_initialize_receive_unit(if_ctx_t ctx); static void em_initialize_receive_unit(if_ctx_t ctx);
static void em_if_enable_intr(if_ctx_t ctx); static void em_if_intr_enable(if_ctx_t ctx);
static void em_if_disable_intr(if_ctx_t ctx); static void em_if_intr_disable(if_ctx_t ctx);
static void igb_if_intr_enable(if_ctx_t ctx);
static void igb_if_intr_disable(if_ctx_t ctx);
static int em_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); static int em_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
static int em_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); static int em_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
static int igb_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
static int igb_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
static void em_if_multi_set(if_ctx_t ctx); static void em_if_multi_set(if_ctx_t ctx);
static void em_if_update_admin_status(if_ctx_t ctx); static void em_if_update_admin_status(if_ctx_t ctx);
static void em_if_debug(if_ctx_t ctx); static void em_if_debug(if_ctx_t ctx);
@ -377,8 +381,8 @@ static device_method_t em_if_methods[] = {
DEVMETHOD(ifdi_init, em_if_init), DEVMETHOD(ifdi_init, em_if_init),
DEVMETHOD(ifdi_stop, em_if_stop), DEVMETHOD(ifdi_stop, em_if_stop),
DEVMETHOD(ifdi_msix_intr_assign, em_if_msix_intr_assign), DEVMETHOD(ifdi_msix_intr_assign, em_if_msix_intr_assign),
DEVMETHOD(ifdi_intr_enable, em_if_enable_intr), DEVMETHOD(ifdi_intr_enable, em_if_intr_enable),
DEVMETHOD(ifdi_intr_disable, em_if_disable_intr), DEVMETHOD(ifdi_intr_disable, em_if_intr_disable),
DEVMETHOD(ifdi_tx_queues_alloc, em_if_tx_queues_alloc), DEVMETHOD(ifdi_tx_queues_alloc, em_if_tx_queues_alloc),
DEVMETHOD(ifdi_rx_queues_alloc, em_if_rx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, em_if_rx_queues_alloc),
DEVMETHOD(ifdi_queues_free, em_if_queues_free), DEVMETHOD(ifdi_queues_free, em_if_queues_free),
@ -400,14 +404,47 @@ static device_method_t em_if_methods[] = {
DEVMETHOD_END DEVMETHOD_END
}; };
/*
* note that if (adapter->msix_mem) is replaced by:
* if (adapter->intr_type == IFLIB_INTR_MSIX)
*/
static driver_t em_if_driver = { static driver_t em_if_driver = {
"em_if", em_if_methods, sizeof(struct adapter) "em_if", em_if_methods, sizeof(struct adapter)
}; };
static device_method_t igb_if_methods[] = {
DEVMETHOD(ifdi_attach_pre, em_if_attach_pre),
DEVMETHOD(ifdi_attach_post, em_if_attach_post),
DEVMETHOD(ifdi_detach, em_if_detach),
DEVMETHOD(ifdi_shutdown, em_if_shutdown),
DEVMETHOD(ifdi_suspend, em_if_suspend),
DEVMETHOD(ifdi_resume, em_if_resume),
DEVMETHOD(ifdi_init, em_if_init),
DEVMETHOD(ifdi_stop, em_if_stop),
DEVMETHOD(ifdi_msix_intr_assign, em_if_msix_intr_assign),
DEVMETHOD(ifdi_intr_enable, igb_if_intr_enable),
DEVMETHOD(ifdi_intr_disable, igb_if_intr_disable),
DEVMETHOD(ifdi_tx_queues_alloc, em_if_tx_queues_alloc),
DEVMETHOD(ifdi_rx_queues_alloc, em_if_rx_queues_alloc),
DEVMETHOD(ifdi_queues_free, em_if_queues_free),
DEVMETHOD(ifdi_update_admin_status, em_if_update_admin_status),
DEVMETHOD(ifdi_multi_set, em_if_multi_set),
DEVMETHOD(ifdi_media_status, em_if_media_status),
DEVMETHOD(ifdi_media_change, em_if_media_change),
DEVMETHOD(ifdi_mtu_set, em_if_mtu_set),
DEVMETHOD(ifdi_promisc_set, em_if_set_promisc),
DEVMETHOD(ifdi_timer, em_if_timer),
DEVMETHOD(ifdi_watchdog_reset, em_if_watchdog_reset),
DEVMETHOD(ifdi_vlan_register, em_if_vlan_register),
DEVMETHOD(ifdi_vlan_unregister, em_if_vlan_unregister),
DEVMETHOD(ifdi_get_counter, em_if_get_counter),
DEVMETHOD(ifdi_led_func, em_if_led_func),
DEVMETHOD(ifdi_rx_queue_intr_enable, igb_if_rx_queue_intr_enable),
DEVMETHOD(ifdi_tx_queue_intr_enable, igb_if_tx_queue_intr_enable),
DEVMETHOD(ifdi_debug, em_if_debug),
DEVMETHOD_END
};
static driver_t igb_if_driver = {
"igb_if", igb_if_methods, sizeof(struct adapter)
};
/********************************************************************* /*********************************************************************
* Tunable default values. * Tunable default values.
*********************************************************************/ *********************************************************************/
@ -527,7 +564,7 @@ static struct if_shared_ctx igb_sctx_init = {
.isc_admin_intrcnt = 1, .isc_admin_intrcnt = 1,
.isc_vendor_info = igb_vendor_info_array, .isc_vendor_info = igb_vendor_info_array,
.isc_driver_version = em_driver_version, .isc_driver_version = em_driver_version,
.isc_driver = &em_if_driver, .isc_driver = &igb_if_driver,
.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM, .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM,
.isc_nrxd_min = {EM_MIN_RXD}, .isc_nrxd_min = {EM_MIN_RXD},
@ -1335,8 +1372,6 @@ em_intr(void *arg)
reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
if (adapter->intr_type != IFLIB_INTR_LEGACY)
goto skip_stray;
/* Hot eject? */ /* Hot eject? */
if (reg_icr == 0xffffffff) if (reg_icr == 0xffffffff)
return FILTER_STRAY; return FILTER_STRAY;
@ -1353,7 +1388,14 @@ em_intr(void *arg)
(reg_icr & E1000_ICR_INT_ASSERTED) == 0) (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
return FILTER_STRAY; return FILTER_STRAY;
skip_stray: /*
* Only MSI-X interrupts have one-shot behavior by taking advantage
* of the EIAC register. Thus, explicitly disable interrupts. This
* also works around the MSI message reordering errata on certain
* systems.
*/
IFDI_INTR_DISABLE(ctx);
/* Link status change */ /* Link status change */
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
adapter->hw.mac.get_link_status = 1; adapter->hw.mac.get_link_status = 1;
@ -1366,40 +1408,13 @@ skip_stray:
return (FILTER_SCHEDULE_THREAD); return (FILTER_SCHEDULE_THREAD);
} }
static void
igb_rx_enable_queue(struct adapter *adapter, struct em_rx_queue *rxq)
{
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, rxq->eims);
}
static void
em_rx_enable_queue(struct adapter *adapter, struct em_rx_queue *rxq)
{
E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxq->eims);
}
static void
igb_tx_enable_queue(struct adapter *adapter, struct em_tx_queue *txq)
{
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, txq->eims);
}
static void
em_tx_enable_queue(struct adapter *adapter, struct em_tx_queue *txq)
{
E1000_WRITE_REG(&adapter->hw, E1000_IMS, txq->eims);
}
static int static int
em_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) em_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
{ {
struct adapter *adapter = iflib_get_softc(ctx); struct adapter *adapter = iflib_get_softc(ctx);
struct em_rx_queue *rxq = &adapter->rx_queues[rxqid]; struct em_rx_queue *rxq = &adapter->rx_queues[rxqid];
if (adapter->hw.mac.type >= igb_mac_min) E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxq->eims);
igb_rx_enable_queue(adapter, rxq);
else
em_rx_enable_queue(adapter, rxq);
return (0); return (0);
} }
@ -1409,10 +1424,27 @@ em_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
struct adapter *adapter = iflib_get_softc(ctx); struct adapter *adapter = iflib_get_softc(ctx);
struct em_tx_queue *txq = &adapter->tx_queues[txqid]; struct em_tx_queue *txq = &adapter->tx_queues[txqid];
if (adapter->hw.mac.type >= igb_mac_min) E1000_WRITE_REG(&adapter->hw, E1000_IMS, txq->eims);
igb_tx_enable_queue(adapter, txq); return (0);
else }
em_tx_enable_queue(adapter, txq);
static int
igb_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct em_rx_queue *rxq = &adapter->rx_queues[rxqid];
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, rxq->eims);
return (0);
}
static int
igb_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct em_tx_queue *txq = &adapter->tx_queues[txqid];
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, txq->eims);
return (0); return (0);
} }
@ -3376,7 +3408,7 @@ em_setup_vlan_hw_support(struct adapter *adapter)
} }
static void static void
em_if_enable_intr(if_ctx_t ctx) em_if_intr_enable(if_ctx_t ctx)
{ {
struct adapter *adapter = iflib_get_softc(ctx); struct adapter *adapter = iflib_get_softc(ctx);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
@ -3385,30 +3417,51 @@ em_if_enable_intr(if_ctx_t ctx)
if (hw->mac.type == e1000_82574) { if (hw->mac.type == e1000_82574) {
E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK); E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
ims_mask |= adapter->ims; ims_mask |= adapter->ims;
} else if (adapter->intr_type == IFLIB_INTR_MSIX && hw->mac.type >= igb_mac_min) {
u32 mask = (adapter->que_mask | adapter->link_mask);
E1000_WRITE_REG(&adapter->hw, E1000_EIAC, mask);
E1000_WRITE_REG(&adapter->hw, E1000_EIAM, mask);
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, mask);
ims_mask = E1000_IMS_LSC;
} }
E1000_WRITE_REG(hw, E1000_IMS, ims_mask); E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
} }
static void static void
em_if_disable_intr(if_ctx_t ctx) em_if_intr_disable(if_ctx_t ctx)
{ {
struct adapter *adapter = iflib_get_softc(ctx); struct adapter *adapter = iflib_get_softc(ctx);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
if (adapter->intr_type == IFLIB_INTR_MSIX) { if (hw->mac.type == e1000_82574)
if (hw->mac.type >= igb_mac_min) E1000_WRITE_REG(hw, EM_EIAC, 0);
E1000_WRITE_REG(&adapter->hw, E1000_EIMC, ~0); E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
E1000_WRITE_REG(&adapter->hw, E1000_EIAC, 0); }
static void
igb_if_intr_enable(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct e1000_hw *hw = &adapter->hw;
u32 mask;
if (__predict_true(adapter->intr_type == IFLIB_INTR_MSIX)) {
mask = (adapter->que_mask | adapter->link_mask);
E1000_WRITE_REG(hw, E1000_EIAC, mask);
E1000_WRITE_REG(hw, E1000_EIAM, mask);
E1000_WRITE_REG(hw, E1000_EIMS, mask);
E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_LSC);
} else
E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK);
E1000_WRITE_FLUSH(hw);
}
static void
igb_if_intr_disable(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct e1000_hw *hw = &adapter->hw;
if (__predict_true(adapter->intr_type == IFLIB_INTR_MSIX)) {
E1000_WRITE_REG(hw, E1000_EIMC, 0xffffffff);
E1000_WRITE_REG(hw, E1000_EIAC, 0);
} }
E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
E1000_WRITE_FLUSH(hw);
} }
/* /*

View File

@ -65,9 +65,11 @@ struct intr_map_data_fdt {
}; };
#endif #endif
#define SIMPLEBUS_PNP_DESCR "Z:compat;P:#;" #define FDTCOMPAT_PNP_DESCR "Z:compat;P:#;"
#define SIMPLEBUS_PNP_INFO(t) \ #define FDTCOMPAT_PNP_INFO(t, busname) \
MODULE_PNP_INFO(SIMPLEBUS_PNP_DESCR, simplebus, t, t, sizeof(t) / sizeof(t[0])); MODULE_PNP_INFO(FDTCOMPAT_PNP_DESCR, busname, t, t, sizeof(t) / sizeof(t[0]));
#define SIMPLEBUS_PNP_INFO(t) FDTCOMPAT_PNP_INFO(t, simplebus)
/* Generic implementation of ofw_bus_if.m methods and helper routines */ /* Generic implementation of ofw_bus_if.m methods and helper routines */
int ofw_bus_gen_setup_devinfo(struct ofw_bus_devinfo *, phandle_t); int ofw_bus_gen_setup_devinfo(struct ofw_bus_devinfo *, phandle_t);

View File

@ -1169,9 +1169,11 @@ pcib_pcie_intr_hotplug(void *arg)
{ {
struct pcib_softc *sc; struct pcib_softc *sc;
device_t dev; device_t dev;
uint16_t old_slot_sta;
sc = arg; sc = arg;
dev = sc->dev; dev = sc->dev;
old_slot_sta = sc->pcie_slot_sta;
sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2);
/* Clear the events just reported. */ /* Clear the events just reported. */
@ -1187,7 +1189,8 @@ pcib_pcie_intr_hotplug(void *arg)
"Attention Button Pressed: Detach Cancelled\n"); "Attention Button Pressed: Detach Cancelled\n");
sc->flags &= ~PCIB_DETACH_PENDING; sc->flags &= ~PCIB_DETACH_PENDING;
callout_stop(&sc->pcie_ab_timer); callout_stop(&sc->pcie_ab_timer);
} else { } else if (old_slot_sta & PCIEM_SLOT_STA_PDS) {
/* Only initiate detach sequence if device present. */
device_printf(dev, device_printf(dev,
"Attention Button Pressed: Detaching in 5 seconds\n"); "Attention Button Pressed: Detaching in 5 seconds\n");
sc->flags |= PCIB_DETACH_PENDING; sc->flags |= PCIB_DETACH_PENDING;

View File

@ -1224,6 +1224,40 @@ complete:
return (0); return (0);
} }
static int
ugen_fs_copy_out_cancelled(struct usb_fs_endpoint *fs_ep_uptr)
{
struct usb_fs_endpoint fs_ep;
int error;
error = copyin(fs_ep_uptr, &fs_ep, sizeof(fs_ep));
if (error)
return (error);
fs_ep.status = USB_ERR_CANCELLED;
fs_ep.aFrames = 0;
fs_ep.isoc_time_complete = 0;
/* update "aFrames" */
error = copyout(&fs_ep.aFrames, &fs_ep_uptr->aFrames,
sizeof(fs_ep.aFrames));
if (error)
goto done;
/* update "isoc_time_complete" */
error = copyout(&fs_ep.isoc_time_complete,
&fs_ep_uptr->isoc_time_complete,
sizeof(fs_ep.isoc_time_complete));
if (error)
goto done;
/* update "status" */
error = copyout(&fs_ep.status, &fs_ep_uptr->status,
sizeof(fs_ep.status));
done:
return (error);
}
static int static int
ugen_fs_copy_out(struct usb_fifo *f, uint8_t ep_index) ugen_fs_copy_out(struct usb_fifo *f, uint8_t ep_index)
{ {
@ -1249,7 +1283,12 @@ ugen_fs_copy_out(struct usb_fifo *f, uint8_t ep_index)
return (EINVAL); return (EINVAL);
mtx_lock(f->priv_mtx); mtx_lock(f->priv_mtx);
if (usbd_transfer_pending(xfer)) { if (!xfer->flags_int.transferring &&
!xfer->flags_int.started) {
mtx_unlock(f->priv_mtx);
DPRINTF("Returning fake cancel event\n");
return (ugen_fs_copy_out_cancelled(f->fs_ep_ptr + ep_index));
} else if (usbd_transfer_pending(xfer)) {
mtx_unlock(f->priv_mtx); mtx_unlock(f->priv_mtx);
return (EBUSY); /* should not happen */ return (EBUSY); /* should not happen */
} }
@ -1370,6 +1409,7 @@ complete:
sizeof(fs_ep.isoc_time_complete)); sizeof(fs_ep.isoc_time_complete));
if (error) if (error)
goto done; goto done;
/* update "status" */ /* update "status" */
error = copyout(&fs_ep.status, &fs_ep_uptr->status, error = copyout(&fs_ep.status, &fs_ep_uptr->status,
sizeof(fs_ep.status)); sizeof(fs_ep.status));
@ -1458,12 +1498,15 @@ ugen_ioctl(struct usb_fifo *f, u_long cmd, void *addr, int fflags)
xfer = f->fs_xfer[u.pstart->ep_index]; xfer = f->fs_xfer[u.pstart->ep_index];
if (usbd_transfer_pending(xfer)) { if (usbd_transfer_pending(xfer)) {
usbd_transfer_stop(xfer); usbd_transfer_stop(xfer);
/* /*
* Check if the USB transfer was stopped * Check if the USB transfer was stopped
* before it was even started. Else a cancel * before it was even started and fake a
* callback will be pending. * cancel event.
*/ */
if (!xfer->flags_int.transferring) { if (!xfer->flags_int.transferring &&
!xfer->flags_int.started) {
DPRINTF("Issuing fake completion event\n");
ugen_fs_set_complete(xfer->priv_sc, ugen_fs_set_complete(xfer->priv_sc,
USB_P2U(xfer->priv_fifo)); USB_P2U(xfer->priv_fifo));
} }

View File

@ -5983,7 +5983,7 @@ _gone_in(int major, const char *msg)
gone_panic(major, P_OSREL_MAJOR(__FreeBSD_version), msg); gone_panic(major, P_OSREL_MAJOR(__FreeBSD_version), msg);
if (P_OSREL_MAJOR(__FreeBSD_version) >= major) if (P_OSREL_MAJOR(__FreeBSD_version) >= major)
printf("Obsolete code will removed soon: %s\n", msg); printf("Obsolete code will removed soon: %s\n", msg);
else if (P_OSREL_MAJOR(__FreeBSD_version) + 1 == major) else
printf("Deprecated code (to be removed in FreeBSD %d): %s\n", printf("Deprecated code (to be removed in FreeBSD %d): %s\n",
major, msg); major, msg);
} }
@ -5996,7 +5996,7 @@ _gone_in_dev(device_t dev, int major, const char *msg)
if (P_OSREL_MAJOR(__FreeBSD_version) >= major) if (P_OSREL_MAJOR(__FreeBSD_version) >= major)
device_printf(dev, device_printf(dev,
"Obsolete code will removed soon: %s\n", msg); "Obsolete code will removed soon: %s\n", msg);
else if (P_OSREL_MAJOR(__FreeBSD_version) + 1 == major) else
device_printf(dev, device_printf(dev,
"Deprecated code (to be removed in FreeBSD %d): %s\n", "Deprecated code (to be removed in FreeBSD %d): %s\n",
major, msg); major, msg);

View File

@ -69,6 +69,8 @@ struct gtaskqueue_busy {
static struct gtask * const TB_DRAIN_WAITER = (struct gtask *)0x1; static struct gtask * const TB_DRAIN_WAITER = (struct gtask *)0x1;
typedef void (*gtaskqueue_enqueue_fn)(void *context);
struct gtaskqueue { struct gtaskqueue {
STAILQ_HEAD(, gtask) tq_queue; STAILQ_HEAD(, gtask) tq_queue;
gtaskqueue_enqueue_fn tq_enqueue; gtaskqueue_enqueue_fn tq_enqueue;
@ -697,7 +699,7 @@ taskqgroup_find(struct taskqgroup *qgroup, void *uniq)
} }
} }
if (idx == -1) if (idx == -1)
panic("taskqgroup_find: Failed to pick a qid."); panic("%s: failed to pick a qid.", __func__);
return (idx); return (idx);
} }
@ -759,7 +761,8 @@ taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
mtx_unlock(&qgroup->tqg_lock); mtx_unlock(&qgroup->tqg_lock);
error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask); error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
if (error) if (error)
printf("%s: setaffinity failed for %s: %d\n", __func__, gtask->gt_name, error); printf("%s: binding interrupt failed for %s: %d\n",
__func__, gtask->gt_name, error);
} else } else
#else /* __rtems__ */ #else /* __rtems__ */
BSD_ASSERT(irq == -1); BSD_ASSERT(irq == -1);
@ -789,16 +792,15 @@ taskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
error = intr_setaffinity(gtask->gt_irq, CPU_WHICH_IRQ, &mask); error = intr_setaffinity(gtask->gt_irq, CPU_WHICH_IRQ, &mask);
mtx_lock(&qgroup->tqg_lock); mtx_lock(&qgroup->tqg_lock);
if (error) if (error)
printf("%s: %s setaffinity failed: %d\n", __func__, gtask->gt_name, error); printf("%s: binding interrupt failed for %s: %d\n",
__func__, gtask->gt_name, error);
} }
#else /* __rtems__ */ #else /* __rtems__ */
BSD_ASSERT(gtask->gt_irq == -1); BSD_ASSERT(gtask->gt_irq == -1);
#endif /* __rtems__ */ #endif /* __rtems__ */
qgroup->tqg_queue[qid].tgc_cnt++; qgroup->tqg_queue[qid].tgc_cnt++;
LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask,
gt_list);
MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL); MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL);
gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
mtx_unlock(&qgroup->tqg_lock); mtx_unlock(&qgroup->tqg_lock);
@ -848,7 +850,8 @@ taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
if (irq != -1 && tqg_smp_started) { if (irq != -1 && tqg_smp_started) {
error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask); error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
if (error) if (error)
printf("%s: setaffinity failed: %d\n", __func__, error); printf("%s: binding interrupt failed for %s: %d\n",
__func__, gtask->gt_name, error);
} }
#else /* __rtems__ */ #else /* __rtems__ */
BSD_ASSERT(irq == -1); BSD_ASSERT(irq == -1);
@ -894,7 +897,8 @@ taskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask *gtas
if (irq != -1) { if (irq != -1) {
error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask); error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
if (error) if (error)
printf("%s: setaffinity failed: %d\n", __func__, error); printf("%s: binding interrupt failed for %s: %d\n",
__func__, gtask->gt_name, error);
} }
#else /* __rtems__ */ #else /* __rtems__ */
BSD_ASSERT(irq == -1); BSD_ASSERT(irq == -1);
@ -913,7 +917,7 @@ taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask)
if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue) if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue)
break; break;
if (i == qgroup->tqg_cnt) if (i == qgroup->tqg_cnt)
panic("taskqgroup_detach: task %s not in group\n", gtask->gt_name); panic("%s: task %s not in group", __func__, gtask->gt_name);
qgroup->tqg_queue[i].tgc_cnt--; qgroup->tqg_queue[i].tgc_cnt--;
LIST_REMOVE(gtask, gt_list); LIST_REMOVE(gtask, gt_list);
mtx_unlock(&qgroup->tqg_lock); mtx_unlock(&qgroup->tqg_lock);
@ -941,8 +945,7 @@ taskqgroup_binder(void *ctx)
thread_unlock(curthread); thread_unlock(curthread);
if (error) if (error)
printf("%s: setaffinity failed: %d\n", __func__, printf("%s: binding curthread failed: %d\n", __func__, error);
error);
#else /* __rtems__ */ #else /* __rtems__ */
sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(mask), &mask); sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(mask), &mask);
if (sc != RTEMS_SUCCESSFUL) if (sc != RTEMS_SUCCESSFUL)
@ -1125,5 +1128,6 @@ taskqgroup_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
void void
taskqgroup_config_gtask_deinit(struct grouptask *gtask) taskqgroup_config_gtask_deinit(struct grouptask *gtask)
{ {
taskqgroup_detach(qgroup_config, gtask); taskqgroup_detach(qgroup_config, gtask);
} }

View File

@ -590,6 +590,19 @@ sleepq_catch_signals(void *wchan, int pri)
} else { } else {
mtx_unlock(&ps->ps_mtx); mtx_unlock(&ps->ps_mtx);
} }
/*
* Do not go into sleep if this thread was the
* ptrace(2) attach leader. cursig() consumed
* SIGSTOP from PT_ATTACH, but we usually act
* on the signal by interrupting sleep, and
* should do that here as well.
*/
if ((td->td_dbgflags & TDB_FSTP) != 0) {
if (ret == 0)
ret = EINTR;
td->td_dbgflags &= ~TDB_FSTP;
}
} }
/* /*
* Lock the per-process spinlock prior to dropping the PROC_LOCK * Lock the per-process spinlock prior to dropping the PROC_LOCK

View File

@ -79,7 +79,7 @@ typedef struct if_rxd_info {
/* XXX redundant with the new irf_len field */ /* XXX redundant with the new irf_len field */
uint16_t iri_len; /* packet length */ uint16_t iri_len; /* packet length */
qidx_t iri_cidx; /* consumer index of cq */ qidx_t iri_cidx; /* consumer index of cq */
struct ifnet *iri_ifp; /* some drivers >1 interface per softc */ if_t iri_ifp; /* driver may have >1 iface per softc */
/* updated by driver */ /* updated by driver */
if_rxd_frag_t iri_frags; if_rxd_frag_t iri_frags;

View File

@ -63,10 +63,10 @@
#define _CRYPTO_CRYPTO_H_ #define _CRYPTO_CRYPTO_H_
#include <sys/ioccom.h> #include <sys/ioccom.h>
#include <sys/_task.h>
#ifdef _KERNEL #ifdef _KERNEL
#include <opencrypto/_cryptodev.h> #include <opencrypto/_cryptodev.h>
#include <sys/_task.h>
#endif #endif
/* Some initial values */ /* Some initial values */

View File

@ -43,23 +43,23 @@ void counter_u64_zero(counter_u64_t);
uint64_t counter_u64_fetch(counter_u64_t); uint64_t counter_u64_fetch(counter_u64_t);
#define COUNTER_ARRAY_ALLOC(a, n, wait) do { \ #define COUNTER_ARRAY_ALLOC(a, n, wait) do { \
for (int i = 0; i < (n); i++) \ for (int _i = 0; _i < (n); _i++) \
(a)[i] = counter_u64_alloc(wait); \ (a)[_i] = counter_u64_alloc(wait); \
} while (0) } while (0)
#define COUNTER_ARRAY_FREE(a, n) do { \ #define COUNTER_ARRAY_FREE(a, n) do { \
for (int i = 0; i < (n); i++) \ for (int _i = 0; _i < (n); _i++) \
counter_u64_free((a)[i]); \ counter_u64_free((a)[_i]); \
} while (0) } while (0)
#define COUNTER_ARRAY_COPY(a, dstp, n) do { \ #define COUNTER_ARRAY_COPY(a, dstp, n) do { \
for (int i = 0; i < (n); i++) \ for (int _i = 0; _i < (n); _i++) \
((uint64_t *)(dstp))[i] = counter_u64_fetch((a)[i]);\ ((uint64_t *)(dstp))[_i] = counter_u64_fetch((a)[_i]);\
} while (0) } while (0)
#define COUNTER_ARRAY_ZERO(a, n) do { \ #define COUNTER_ARRAY_ZERO(a, n) do { \
for (int i = 0; i < (n); i++) \ for (int _i = 0; _i < (n); _i++) \
counter_u64_zero((a)[i]); \ counter_u64_zero((a)[_i]); \
} while (0) } while (0)
/* /*

View File

@ -38,7 +38,6 @@
#endif #endif
struct gtaskqueue; struct gtaskqueue;
typedef void (*gtaskqueue_enqueue_fn)(void *context);
/* /*
* Taskqueue groups. Manages dynamic thread groups and irq binding for * Taskqueue groups. Manages dynamic thread groups and irq binding for
@ -55,28 +54,29 @@ void gtaskqueue_drain_all(struct gtaskqueue *queue);
void grouptask_block(struct grouptask *grouptask); void grouptask_block(struct grouptask *grouptask);
void grouptask_unblock(struct grouptask *grouptask); void grouptask_unblock(struct grouptask *grouptask);
int grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *task); int grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *task);
void taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *grptask, void taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *grptask,
void *uniq, int irq, const char *name); void *uniq, int irq, const char *name);
int taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *grptask, int taskqgroup_attach_cpu(struct taskqgroup *qgroup,
void *uniq, int cpu, int irq, const char *name); struct grouptask *grptask, void *uniq, int cpu, int irq,
const char *name);
void taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask); void taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask);
struct taskqgroup *taskqgroup_create(const char *name); struct taskqgroup *taskqgroup_create(const char *name);
void taskqgroup_destroy(struct taskqgroup *qgroup); void taskqgroup_destroy(struct taskqgroup *qgroup);
int taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride); int taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride);
void taskqgroup_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn, void taskqgroup_config_gtask_init(void *ctx, struct grouptask *gtask,
const char *name); gtask_fn_t *fn, const char *name);
void taskqgroup_config_gtask_deinit(struct grouptask *gtask); void taskqgroup_config_gtask_deinit(struct grouptask *gtask);
#define TASK_ENQUEUED 0x1 #define TASK_ENQUEUED 0x1
#define TASK_SKIP_WAKEUP 0x2 #define TASK_SKIP_WAKEUP 0x2
#define TASK_NOENQUEUE 0x4 #define TASK_NOENQUEUE 0x4
#define GTASK_INIT(gtask, flags, priority, func, context) do { \
#define GTASK_INIT(task, flags, priority, func, context) do { \ (gtask)->ta_flags = flags; \
(task)->ta_flags = flags; \ (gtask)->ta_priority = (priority); \
(task)->ta_priority = (priority); \ (gtask)->ta_func = (func); \
(task)->ta_func = (func); \ (gtask)->ta_context = (context); \
(task)->ta_context = (context); \
} while (0) } while (0)
#define GROUPTASK_INIT(gtask, priority, func, context) \ #define GROUPTASK_INIT(gtask, priority, func, context) \

View File

@ -57,9 +57,10 @@
#define M_NOVM 0x0200 /* don't ask VM for pages */ #define M_NOVM 0x0200 /* don't ask VM for pages */
#define M_USE_RESERVE 0x0400 /* can alloc out of reserve memory */ #define M_USE_RESERVE 0x0400 /* can alloc out of reserve memory */
#define M_NODUMP 0x0800 /* don't dump pages in this allocation */ #define M_NODUMP 0x0800 /* don't dump pages in this allocation */
#define M_FIRSTFIT 0x1000 /* Only for vmem, fast fit. */ #define M_FIRSTFIT 0x1000 /* only for vmem, fast fit */
#define M_BESTFIT 0x2000 /* Only for vmem, low fragmentation. */ #define M_BESTFIT 0x2000 /* only for vmem, low fragmentation */
#define M_EXEC 0x4000 /* allocate executable space. */ #define M_EXEC 0x4000 /* allocate executable space */
#define M_NEXTFIT 0x8000 /* only for vmem, follow cursor */
#define M_MAGIC 877983977 /* time when first defined :-) */ #define M_MAGIC 877983977 /* time when first defined :-) */

View File

@ -170,7 +170,8 @@ struct vnode {
u_int v_iflag; /* i vnode flags (see below) */ u_int v_iflag; /* i vnode flags (see below) */
u_int v_vflag; /* v vnode flags */ u_int v_vflag; /* v vnode flags */
u_int v_mflag; /* l mnt-specific vnode flags */ u_int v_mflag; /* l mnt-specific vnode flags */
int v_writecount; /* v ref count of writers */ int v_writecount; /* I ref count of writers or
(negative) text users */
u_int v_hash; u_int v_hash;
enum vtype v_type; /* u vnode type */ enum vtype v_type; /* u vnode type */
}; };
@ -245,7 +246,6 @@ struct xvnode {
#define VV_NOSYNC 0x0004 /* unlinked, stop syncing */ #define VV_NOSYNC 0x0004 /* unlinked, stop syncing */
#define VV_ETERNALDEV 0x0008 /* device that is never destroyed */ #define VV_ETERNALDEV 0x0008 /* device that is never destroyed */
#define VV_CACHEDLABEL 0x0010 /* Vnode has valid cached MAC label */ #define VV_CACHEDLABEL 0x0010 /* Vnode has valid cached MAC label */
#define VV_TEXT 0x0020 /* vnode is a pure text prototype */
#define VV_COPYONWRITE 0x0040 /* vnode is doing copy-on-write */ #define VV_COPYONWRITE 0x0040 /* vnode is doing copy-on-write */
#define VV_SYSTEM 0x0080 /* vnode being used by kernel */ #define VV_SYSTEM 0x0080 /* vnode being used by kernel */
#define VV_PROCDEP 0x0100 /* vnode is process dependent */ #define VV_PROCDEP 0x0100 /* vnode is process dependent */
@ -658,8 +658,7 @@ void vgone(struct vnode *vp);
void _vhold(struct vnode *, bool); void _vhold(struct vnode *, bool);
void vinactive(struct vnode *, struct thread *); void vinactive(struct vnode *, struct thread *);
int vinvalbuf(struct vnode *vp, int save, int slpflag, int slptimeo); int vinvalbuf(struct vnode *vp, int save, int slpflag, int slptimeo);
int vtruncbuf(struct vnode *vp, struct ucred *cred, off_t length, int vtruncbuf(struct vnode *vp, off_t length, int blksize);
int blksize);
void vunref(struct vnode *); void vunref(struct vnode *);
void vn_printf(struct vnode *vp, const char *fmt, ...) __printflike(2,3); void vn_printf(struct vnode *vp, const char *fmt, ...) __printflike(2,3);
int vrecycle(struct vnode *vp); int vrecycle(struct vnode *vp);
@ -749,6 +748,7 @@ int vop_stdadvlock(struct vop_advlock_args *ap);
int vop_stdadvlockasync(struct vop_advlockasync_args *ap); int vop_stdadvlockasync(struct vop_advlockasync_args *ap);
int vop_stdadvlockpurge(struct vop_advlockpurge_args *ap); int vop_stdadvlockpurge(struct vop_advlockpurge_args *ap);
int vop_stdallocate(struct vop_allocate_args *ap); int vop_stdallocate(struct vop_allocate_args *ap);
int vop_stdset_text(struct vop_set_text_args *ap);
int vop_stdpathconf(struct vop_pathconf_args *); int vop_stdpathconf(struct vop_pathconf_args *);
int vop_stdpoll(struct vop_poll_args *); int vop_stdpoll(struct vop_poll_args *);
int vop_stdvptocnp(struct vop_vptocnp_args *ap); int vop_stdvptocnp(struct vop_vptocnp_args *ap);
@ -827,6 +827,33 @@ void vop_rename_fail(struct vop_rename_args *ap);
#define VOP_LOCK(vp, flags) VOP_LOCK1(vp, flags, __FILE__, __LINE__) #define VOP_LOCK(vp, flags) VOP_LOCK1(vp, flags, __FILE__, __LINE__)
#ifdef INVARIANTS
#define VOP_ADD_WRITECOUNT_CHECKED(vp, cnt) \
do { \
int error_; \
\
error_ = VOP_ADD_WRITECOUNT((vp), (cnt)); \
MPASS(error_ == 0); \
} while (0)
#define VOP_SET_TEXT_CHECKED(vp) \
do { \
int error_; \
\
error_ = VOP_SET_TEXT((vp)); \
MPASS(error_ == 0); \
} while (0)
#define VOP_UNSET_TEXT_CHECKED(vp) \
do { \
int error_; \
\
error_ = VOP_UNSET_TEXT((vp)); \
MPASS(error_ == 0); \
} while (0)
#else
#define VOP_ADD_WRITECOUNT_CHECKED(vp, cnt) VOP_ADD_WRITECOUNT((vp), (cnt))
#define VOP_SET_TEXT_CHECKED(vp) VOP_SET_TEXT((vp))
#define VOP_UNSET_TEXT_CHECKED(vp) VOP_UNSET_TEXT((vp))
#endif
void vput(struct vnode *vp); void vput(struct vnode *vp);
void vrele(struct vnode *vp); void vrele(struct vnode *vp);

View File

@ -176,9 +176,16 @@ static int boot_pages;
static struct sx uma_drain_lock; static struct sx uma_drain_lock;
/* kmem soft limit. */ /*
* kmem soft limit, initialized by uma_set_limit(). Ensure that early
* allocations don't trigger a wakeup of the reclaim thread.
*/
static unsigned long uma_kmem_limit = LONG_MAX; static unsigned long uma_kmem_limit = LONG_MAX;
static volatile unsigned long uma_kmem_total; SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0,
"UMA kernel memory soft limit");
static unsigned long uma_kmem_total;
SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0,
"UMA kernel memory usage");
#ifndef __rtems__ #ifndef __rtems__
/* Is the VM done starting up? */ /* Is the VM done starting up? */
@ -299,7 +306,7 @@ static void keg_small_init(uma_keg_t keg);
static void keg_large_init(uma_keg_t keg); static void keg_large_init(uma_keg_t keg);
static void zone_foreach(void (*zfunc)(uma_zone_t)); static void zone_foreach(void (*zfunc)(uma_zone_t));
static void zone_timeout(uma_zone_t zone); static void zone_timeout(uma_zone_t zone);
static int hash_alloc(struct uma_hash *); static int hash_alloc(struct uma_hash *, u_int);
static int hash_expand(struct uma_hash *, struct uma_hash *); static int hash_expand(struct uma_hash *, struct uma_hash *);
static void hash_free(struct uma_hash *hash); static void hash_free(struct uma_hash *hash);
static void uma_timeout(void *); static void uma_timeout(void *);
@ -628,6 +635,7 @@ zone_domain_update_wss(uma_zone_domain_t zdom)
static void static void
keg_timeout(uma_keg_t keg) keg_timeout(uma_keg_t keg)
{ {
u_int slabs;
KEG_LOCK(keg); KEG_LOCK(keg);
/* /*
@ -638,7 +646,8 @@ keg_timeout(uma_keg_t keg)
* may be a little aggressive. Should I allow for two collisions max? * may be a little aggressive. Should I allow for two collisions max?
*/ */
if (keg->uk_flags & UMA_ZONE_HASH && if (keg->uk_flags & UMA_ZONE_HASH &&
keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { (slabs = keg->uk_pages / keg->uk_ppera) >
keg->uk_hash.uh_hashsize) {
struct uma_hash newhash; struct uma_hash newhash;
struct uma_hash oldhash; struct uma_hash oldhash;
int ret; int ret;
@ -649,9 +658,8 @@ keg_timeout(uma_keg_t keg)
* I have to do everything in stages and check for * I have to do everything in stages and check for
* races. * races.
*/ */
newhash = keg->uk_hash;
KEG_UNLOCK(keg); KEG_UNLOCK(keg);
ret = hash_alloc(&newhash); ret = hash_alloc(&newhash, 1 << fls(slabs));
KEG_LOCK(keg); KEG_LOCK(keg);
if (ret) { if (ret) {
if (hash_expand(&keg->uk_hash, &newhash)) { if (hash_expand(&keg->uk_hash, &newhash)) {
@ -692,16 +700,13 @@ zone_timeout(uma_zone_t zone)
* 1 on success and 0 on failure. * 1 on success and 0 on failure.
*/ */
static int static int
hash_alloc(struct uma_hash *hash) hash_alloc(struct uma_hash *hash, u_int size)
{ {
u_int oldsize;
size_t alloc; size_t alloc;
oldsize = hash->uh_hashsize; KASSERT(powerof2(size), ("hash size must be power of 2"));
if (size > UMA_HASH_SIZE_INIT) {
/* We're just going to go to a power of two greater */ hash->uh_hashsize = size;
if (oldsize) {
hash->uh_hashsize = oldsize * 2;
alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
hash->uh_slab_hash = (struct slabhead *)malloc(alloc, hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
M_UMAHASH, M_NOWAIT); M_UMAHASH, M_NOWAIT);
@ -1353,9 +1358,9 @@ pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
zkva += PAGE_SIZE; zkva += PAGE_SIZE;
} }
return ((void*)addr); return ((void*)addr);
fail: fail:
TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
vm_page_unwire(p, PQ_NONE); vm_page_unwire_noq(p);
vm_page_free(p); vm_page_free(p);
} }
return (NULL); return (NULL);
@ -1405,7 +1410,7 @@ noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
* exit. * exit.
*/ */
TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
vm_page_unwire(p, PQ_NONE); vm_page_unwire_noq(p);
vm_page_free(p); vm_page_free(p);
} }
return (NULL); return (NULL);
@ -1475,7 +1480,7 @@ pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
for (curva = sva; curva < sva + size; curva += PAGE_SIZE) { for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
paddr = pmap_kextract(curva); paddr = pmap_kextract(curva);
m = PHYS_TO_VM_PAGE(paddr); m = PHYS_TO_VM_PAGE(paddr);
vm_page_unwire(m, PQ_NONE); vm_page_unwire_noq(m);
vm_page_free(m); vm_page_free(m);
} }
pmap_qremove(sva, size >> PAGE_SHIFT); pmap_qremove(sva, size >> PAGE_SHIFT);
@ -1820,7 +1825,7 @@ keg_ctor(void *mem, int size, void *udata, int flags)
} }
if (keg->uk_flags & UMA_ZONE_HASH) if (keg->uk_flags & UMA_ZONE_HASH)
hash_alloc(&keg->uk_hash); hash_alloc(&keg->uk_hash, 0);
CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n", CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
keg, zone->uz_name, zone, keg, zone->uz_name, zone,
@ -4065,14 +4070,14 @@ unsigned long
uma_size(void) uma_size(void)
{ {
return (uma_kmem_total); return (atomic_load_long(&uma_kmem_total));
} }
long long
uma_avail(void) uma_avail(void)
{ {
return (uma_kmem_limit - uma_kmem_total); return (uma_kmem_limit - uma_size());
} }
void void

View File

@ -18,7 +18,6 @@
#define icmp_stats _bsd_netstat_icmp_stats #define icmp_stats _bsd_netstat_icmp_stats
#define igmp_stats _bsd_netstat_igmp_stats #define igmp_stats _bsd_netstat_igmp_stats
#define inetname _bsd_netstat_inetname #define inetname _bsd_netstat_inetname
#define inetprint _bsd_netstat_inetprint
#define ip_stats _bsd_netstat_ip_stats #define ip_stats _bsd_netstat_ip_stats
#define pim_stats _bsd_netstat_pim_stats #define pim_stats _bsd_netstat_pim_stats
#define protopr _bsd_netstat_protopr #define protopr _bsd_netstat_protopr

View File

@ -4348,6 +4348,7 @@
#define USB_PRODUCT_SILABS_CP210X_3 0xea70 /* CP210x Serial */ #define USB_PRODUCT_SILABS_CP210X_3 0xea70 /* CP210x Serial */
#define USB_PRODUCT_SILABS_CP210X_4 0xea80 /* CP210x Serial */ #define USB_PRODUCT_SILABS_CP210X_4 0xea80 /* CP210x Serial */
#define USB_PRODUCT_SILABS_INFINITY_MIC 0xea71 /* Infinity GPS-MIC-1 Radio Monophone */ #define USB_PRODUCT_SILABS_INFINITY_MIC 0xea71 /* Infinity GPS-MIC-1 Radio Monophone */
#define USB_PRODUCT_SILABS_CP2112 0xea90 /* CP2112 HID USB-to-SMBus Bridge with GPIO */
#define USB_PRODUCT_SILABS_USBSCOPE50 0xf001 /* USBscope50 */ #define USB_PRODUCT_SILABS_USBSCOPE50 0xf001 /* USBscope50 */
#define USB_PRODUCT_SILABS_USBWAVE12 0xf002 /* USBwave12 */ #define USB_PRODUCT_SILABS_USBWAVE12 0xf002 /* USBwave12 */
#define USB_PRODUCT_SILABS_USBPULSE100 0xf003 /* USBpulse100 */ #define USB_PRODUCT_SILABS_USBPULSE100 0xf003 /* USBpulse100 */

View File

@ -16531,6 +16531,12 @@ const struct usb_knowndev usb_knowndevs[] = {
"Silicon Labs", "Silicon Labs",
"Infinity GPS-MIC-1 Radio Monophone", "Infinity GPS-MIC-1 Radio Monophone",
}, },
{
USB_VENDOR_SILABS, USB_PRODUCT_SILABS_CP2112,
0,
"Silicon Labs",
"CP2112 HID USB-to-SMBus Bridge with GPIO",
},
{ {
USB_VENDOR_SILABS, USB_PRODUCT_SILABS_USBSCOPE50, USB_VENDOR_SILABS, USB_PRODUCT_SILABS_USBSCOPE50,
0, 0,