Update to FreeBSD 9.2

This commit is contained in:
Sebastian Huber
2013-11-06 16:20:21 +01:00
parent ce96623934
commit 66659ff1ad
596 changed files with 50781 additions and 19477 deletions

View File

@@ -248,6 +248,10 @@ int uma_zsecond_add(uma_zone_t zone, uma_zone_t master);
* backend pages and can fail early.
*/
#define UMA_ZONE_VTOSLAB 0x2000 /* Zone uses vtoslab for lookup. */
#define UMA_ZONE_NODUMP 0x4000 /*
* Zone's pages will not be included in
* mini-dumps.
*/
/*
* These flags are shared between the keg and zone. In zones wishing to add
@@ -452,11 +456,12 @@ int uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int size);
*
* Arguments:
* zone The zone to limit
* nitems The requested upper limit on the number of items allowed
*
* Returns:
* Nothing
* int The effective value of nitems after rounding up based on page size
*/
void uma_zone_set_max(uma_zone_t zone, int nitems);
int uma_zone_set_max(uma_zone_t zone, int nitems);
/*
* Obtains the effective limit on the number of items in a zone
@@ -623,7 +628,8 @@ struct uma_type_header {
u_int64_t uth_allocs; /* Zone: number of allocations. */
u_int64_t uth_frees; /* Zone: number of frees. */
u_int64_t uth_fails; /* Zone: number of alloc failures. */
u_int64_t _uth_reserved1[3]; /* Reserved. */
u_int64_t uth_sleeps; /* Zone: number of alloc sleeps. */
u_int64_t _uth_reserved1[2]; /* Reserved. */
};
struct uma_percpu_stat {

View File

@@ -88,8 +88,6 @@ __FBSDID("$FreeBSD$");
#include <vm/uma_int.h>
#include <vm/uma_dbg.h>
#include <machine/vmparam.h>
#include <ddb/ddb.h>
#ifdef __rtems__
@@ -117,7 +115,7 @@ static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */
static uma_zone_t hashzone;
/* The boot-time adjusted value for cache line alignment. */
static int uma_align_cache = 64 - 1;
int uma_align_cache = 64 - 1;
static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
@@ -144,6 +142,8 @@ static struct mtx uma_boot_pages_mtx;
/* Is the VM done starting up? */
static int booted = 0;
#define UMA_STARTUP 1
#define UMA_STARTUP2 2
#endif /* __rtems__ */
/* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
@@ -862,6 +862,9 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
else
wait &= ~M_ZERO;
if (keg->uk_flags & UMA_ZONE_NODUMP)
wait |= M_NODUMP;
/* zone is passed for legacy reasons. */
mem = allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, &flags, wait);
if (mem == NULL) {
@@ -990,7 +993,7 @@ startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
return (tmps->us_data);
}
mtx_unlock(&uma_boot_pages_mtx);
if (booted == 0)
if (booted < UMA_STARTUP2)
panic("UMA: Increase vm.boot_pages");
/*
* Now that we've booted reset these users to their real allocator.
@@ -1072,10 +1075,8 @@ obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
while (pages != startpages) {
pages--;
p = TAILQ_LAST(&object->memq, pglist);
vm_page_lock_queues();
vm_page_unwire(p, 0);
vm_page_free(p);
vm_page_unlock_queues();
}
retkva = 0;
goto done;
@@ -1360,11 +1361,20 @@ keg_ctor(void *mem, int size, void *udata, int flags)
#ifdef UMA_MD_SMALL_ALLOC
keg->uk_allocf = uma_small_alloc;
keg->uk_freef = uma_small_free;
#ifndef __rtems__
if (booted < UMA_STARTUP)
keg->uk_allocf = startup_alloc;
#endif /* __rtems__ */
#else
#ifndef __rtems__
if (booted < UMA_STARTUP2)
keg->uk_allocf = startup_alloc;
#endif /* __rtems__ */
#endif
#ifndef __rtems__
if (booted == 0)
keg->uk_allocf = startup_alloc;
} else if (booted == 0 && (keg->uk_flags & UMA_ZFLAG_INTERNAL))
} else if (booted < UMA_STARTUP2 &&
(keg->uk_flags & UMA_ZFLAG_INTERNAL))
keg->uk_allocf = startup_alloc;
#else /* __rtems__ */
}
@@ -1463,6 +1473,7 @@ zone_ctor(void *mem, int size, void *udata, int flags)
zone->uz_allocs = 0;
zone->uz_frees = 0;
zone->uz_fails = 0;
zone->uz_sleeps = 0;
zone->uz_fills = zone->uz_count = 0;
zone->uz_flags = 0;
keg = arg->keg;
@@ -1820,9 +1831,9 @@ uma_startup(void *bootmem, int boot_pages)
bucket_init();
#if defined(UMA_MD_SMALL_ALLOC) && !defined(UMA_MD_SMALL_ALLOC_NEEDS_VM)
booted = 1;
#endif
#ifndef __rtems__
booted = UMA_STARTUP;
#endif /* __rtems__ */
#ifdef UMA_DEBUG
printf("UMA startup complete.\n");
@@ -1846,7 +1857,7 @@ SYSINIT(rtems_bsd_uma_startup, SI_SUB_VM, SI_ORDER_FIRST,
void
uma_startup2(void)
{
booted = 1;
booted = UMA_STARTUP2;
bucket_enable();
#ifdef UMA_DEBUG
printf("UMA startup2 complete.\n");
@@ -2245,6 +2256,7 @@ keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
zone->uz_flags |= UMA_ZFLAG_FULL;
if (flags & M_NOWAIT)
break;
zone->uz_sleeps++;
msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
continue;
}
@@ -2388,6 +2400,7 @@ zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
*/
if (full && !empty) {
zone->uz_flags |= UMA_ZFLAG_FULL;
zone->uz_sleeps++;
msleep(zone, zone->uz_lock, PVM, "zonelimit", hz/100);
zone->uz_flags &= ~UMA_ZFLAG_FULL;
continue;
@@ -2894,7 +2907,7 @@ zone_free_item(uma_zone_t zone, void *item, void *udata,
}
/* See uma.h */
void
int
uma_zone_set_max(uma_zone_t zone, int nitems)
{
uma_keg_t keg;
@@ -2904,8 +2917,10 @@ uma_zone_set_max(uma_zone_t zone, int nitems)
keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
if (keg->uk_maxpages * keg->uk_ipers < nitems)
keg->uk_maxpages += keg->uk_ppera;
nitems = keg->uk_maxpages * keg->uk_ipers;
ZONE_UNLOCK(zone);
return (nitems);
}
/* See uma.h */
@@ -3040,13 +3055,11 @@ uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
if (kva == 0)
return (0);
if (obj == NULL) {
obj = vm_object_allocate(OBJT_DEFAULT,
pages);
} else {
if (obj == NULL)
obj = vm_object_allocate(OBJT_PHYS, pages);
else {
VM_OBJECT_LOCK_INIT(obj, "uma object");
_vm_object_allocate(OBJT_DEFAULT,
pages, obj);
_vm_object_allocate(OBJT_PHYS, pages, obj);
}
ZONE_LOCK(zone);
keg->uk_kva = kva;
@@ -3257,13 +3270,13 @@ uma_print_zone(uma_zone_t zone)
*/
static void
uma_zone_sumstat(uma_zone_t z, int *cachefreep, u_int64_t *allocsp,
u_int64_t *freesp)
u_int64_t *freesp, u_int64_t *sleepsp)
{
uma_cache_t cache;
u_int64_t allocs, frees;
u_int64_t allocs, frees, sleeps;
int cachefree, cpu;
allocs = frees = 0;
allocs = frees = sleeps = 0;
cachefree = 0;
CPU_FOREACH(cpu) {
cache = &z->uz_cpu[cpu];
@@ -3276,12 +3289,15 @@ uma_zone_sumstat(uma_zone_t z, int *cachefreep, u_int64_t *allocsp,
}
allocs += z->uz_allocs;
frees += z->uz_frees;
sleeps += z->uz_sleeps;
if (cachefreep != NULL)
*cachefreep = cachefree;
if (allocsp != NULL)
*allocsp = allocs;
if (freesp != NULL)
*freesp = frees;
if (sleepsp != NULL)
*sleepsp = sleeps;
}
#endif /* DDB */
@@ -3315,36 +3331,19 @@ sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
uma_keg_t kz;
uma_zone_t z;
uma_keg_t k;
char *buffer;
int buflen, count, error, i;
int count, error, i;
error = sysctl_wire_old_buffer(req, 0);
if (error != 0)
return (error);
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
mtx_lock(&uma_mtx);
restart:
mtx_assert(&uma_mtx, MA_OWNED);
count = 0;
mtx_lock(&uma_mtx);
LIST_FOREACH(kz, &uma_kegs, uk_link) {
LIST_FOREACH(z, &kz->uk_zones, uz_link)
count++;
}
mtx_unlock(&uma_mtx);
buflen = sizeof(ush) + count * (sizeof(uth) + sizeof(ups) *
(mp_maxid + 1)) + 1;
buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
mtx_lock(&uma_mtx);
i = 0;
LIST_FOREACH(kz, &uma_kegs, uk_link) {
LIST_FOREACH(z, &kz->uk_zones, uz_link)
i++;
}
if (i > count) {
free(buffer, M_TEMP);
goto restart;
}
count = i;
sbuf_new(&sbuf, buffer, buflen, SBUF_FIXEDLEN);
/*
* Insert stream header.
@@ -3353,11 +3352,7 @@ restart:
ush.ush_version = UMA_STREAM_VERSION;
ush.ush_maxcpus = (mp_maxid + 1);
ush.ush_count = count;
if (sbuf_bcat(&sbuf, &ush, sizeof(ush)) < 0) {
mtx_unlock(&uma_mtx);
error = ENOMEM;
goto out;
}
(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
LIST_FOREACH(kz, &uma_kegs, uk_link) {
LIST_FOREACH(z, &kz->uk_zones, uz_link) {
@@ -3389,12 +3384,8 @@ restart:
uth.uth_allocs = z->uz_allocs;
uth.uth_frees = z->uz_frees;
uth.uth_fails = z->uz_fails;
if (sbuf_bcat(&sbuf, &uth, sizeof(uth)) < 0) {
ZONE_UNLOCK(z);
mtx_unlock(&uma_mtx);
error = ENOMEM;
goto out;
}
uth.uth_sleeps = z->uz_sleeps;
(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
/*
* While it is not normally safe to access the cache
* bucket pointers while not on the CPU that owns the
@@ -3419,53 +3410,47 @@ restart:
ups.ups_allocs = cache->uc_allocs;
ups.ups_frees = cache->uc_frees;
skip:
if (sbuf_bcat(&sbuf, &ups, sizeof(ups)) < 0) {
ZONE_UNLOCK(z);
mtx_unlock(&uma_mtx);
error = ENOMEM;
goto out;
}
(void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
}
ZONE_UNLOCK(z);
}
}
mtx_unlock(&uma_mtx);
sbuf_finish(&sbuf);
error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
out:
free(buffer, M_TEMP);
error = sbuf_finish(&sbuf);
sbuf_delete(&sbuf);
return (error);
}
#ifdef DDB
DB_SHOW_COMMAND(uma, db_show_uma)
{
u_int64_t allocs, frees;
u_int64_t allocs, frees, sleeps;
uma_bucket_t bucket;
uma_keg_t kz;
uma_zone_t z;
int cachefree;
db_printf("%18s %8s %8s %8s %12s\n", "Zone", "Size", "Used", "Free",
"Requests");
db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
"Requests", "Sleeps");
LIST_FOREACH(kz, &uma_kegs, uk_link) {
LIST_FOREACH(z, &kz->uk_zones, uz_link) {
if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
allocs = z->uz_allocs;
frees = z->uz_frees;
sleeps = z->uz_sleeps;
cachefree = 0;
} else
uma_zone_sumstat(z, &cachefree, &allocs,
&frees);
&frees, &sleeps);
if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
(LIST_FIRST(&kz->uk_zones) != z)))
cachefree += kz->uk_free;
LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link)
cachefree += bucket->ub_cnt;
db_printf("%18s %8ju %8jd %8d %12ju\n", z->uz_name,
db_printf("%18s %8ju %8jd %8d %12ju %8ju\n", z->uz_name,
(uintmax_t)kz->uk_size,
(intmax_t)(allocs - frees), cachefree,
(uintmax_t)allocs);
(uintmax_t)allocs, sleeps);
if (db_pager_quit)
return;
}

View File

@@ -45,7 +45,7 @@
*
* The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
* be allocated off the page from a special slab zone. The free list within a
* slab is managed with a linked list of indexes, which are 8 bit values. If
* slab is managed with a linked list of indices, which are 8 bit values. If
* UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit
* values. Currently on alpha you can get 250 or so 32 byte items and on x86
* you can get 250 or so 16byte items. For item sizes that would yield more
@@ -56,9 +56,9 @@
* wasted between items due to alignment problems. This may yield a much better
* memory footprint for certain sizes of objects. Another alternative is to
* increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes. I prefer
* dynamic slab sizes because we could stick with 8 bit indexes and only use
* dynamic slab sizes because we could stick with 8 bit indices and only use
* large slab sizes for zones with a lot of waste per slab. This may create
* ineffeciencies in the vm subsystem due to fragmentation in the address space.
* inefficiencies in the vm subsystem due to fragmentation in the address space.
*
* The only really gross cases, with regards to memory waste, are for those
* items that are just over half the page size. You can get nearly 50% waste,
@@ -118,7 +118,7 @@
#define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
#define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
#define UMA_BOOT_PAGES 48 /* Pages allocated for startup */
#define UMA_BOOT_PAGES 64 /* Pages allocated for startup */
/* Max waste before going to off page slab management */
#define UMA_MAX_WASTE (UMA_SLAB_SIZE / 10)
@@ -159,6 +159,15 @@ struct uma_hash {
int uh_hashmask; /* Mask used during hashing */
};
/*
* align field or structure to cache line
*/
#if defined(__amd64__)
#define UMA_ALIGN __aligned(CACHE_LINE_SIZE)
#else
#define UMA_ALIGN
#endif
/*
* Structures for per cpu queues.
*/
@@ -177,7 +186,7 @@ struct uma_cache {
uma_bucket_t uc_allocbucket; /* Bucket to allocate from */
u_int64_t uc_allocs; /* Count of allocations */
u_int64_t uc_frees; /* Count of frees */
};
} UMA_ALIGN;
typedef struct uma_cache * uma_cache_t;
@@ -312,11 +321,13 @@ struct uma_zone {
uma_init uz_init; /* Initializer for each item */
uma_fini uz_fini; /* Discards memory */
u_int64_t uz_allocs; /* Total number of allocations */
u_int64_t uz_frees; /* Total number of frees */
u_int64_t uz_fails; /* Total number of alloc failures */
u_int32_t uz_flags; /* Flags inherited from kegs */
u_int32_t uz_size; /* Size inherited from kegs */
u_int64_t uz_allocs UMA_ALIGN; /* Total number of allocations */
u_int64_t uz_frees; /* Total number of frees */
u_int64_t uz_fails; /* Total number of alloc failures */
u_int64_t uz_sleeps; /* Total number of alloc sleeps */
uint16_t uz_fills; /* Outstanding bucket fills */
uint16_t uz_count; /* Highest value ub_ptr can have */
@@ -324,7 +335,7 @@ struct uma_zone {
* This HAS to be the last item because we adjust the zone size
* based on NCPU and then allocate the space for the zones.
*/
struct uma_cache uz_cpu[1]; /* Per cpu caches */
struct uma_cache uz_cpu[1]; /* Per cpu caches */
};
/*
@@ -341,6 +352,8 @@ struct uma_zone {
#define UMA_ZFLAG_INHERIT (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | \
UMA_ZFLAG_BUCKET)
#undef UMA_ALIGN
#ifdef _KERNEL
/* Internal prototypes */
static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data);

View File

@@ -76,14 +76,14 @@ typedef u_char vm_prot_t; /* protection codes */
#define VM_PROT_READ ((vm_prot_t) 0x01)
#define VM_PROT_WRITE ((vm_prot_t) 0x02)
#define VM_PROT_EXECUTE ((vm_prot_t) 0x04)
#define VM_PROT_OVERRIDE_WRITE ((vm_prot_t) 0x08) /* copy-on-write */
#define VM_PROT_COPY ((vm_prot_t) 0x08) /* copy-on-read */
#define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
#define VM_PROT_RW (VM_PROT_READ|VM_PROT_WRITE)
#define VM_PROT_DEFAULT VM_PROT_ALL
enum obj_type { OBJT_DEFAULT, OBJT_SWAP, OBJT_VNODE, OBJT_DEVICE, OBJT_PHYS,
OBJT_DEAD, OBJT_SG };
OBJT_DEAD, OBJT_SG, OBJT_MGTDEVICE };
typedef u_char objtype_t;
union vm_map_object;
@@ -136,17 +136,21 @@ struct kva_md_info {
vm_offset_t clean_eva;
vm_offset_t pager_sva;
vm_offset_t pager_eva;
vm_offset_t bio_transient_sva;
vm_offset_t bio_transient_eva;
};
extern struct kva_md_info kmi;
extern void vm_ksubmap_init(struct kva_md_info *);
struct uidinfo;
extern int old_mlock;
struct ucred;
int swap_reserve(vm_ooffset_t incr);
int swap_reserve_by_uid(vm_ooffset_t incr, struct uidinfo *uip);
int swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred);
void swap_reserve_force(vm_ooffset_t incr);
void swap_release(vm_ooffset_t decr);
void swap_release_by_uid(vm_ooffset_t decr, struct uidinfo *uip);
void swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred);
#endif /* VM_H */

View File

@@ -63,8 +63,14 @@ void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t,
vm_ooffset_t *);
int vm_fault_disable_pagefaults(void);
void vm_fault_enable_pagefaults(int save);
#ifndef __rtems__
int vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
int fault_flags, vm_page_t *m_hold);
int vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
vm_prot_t prot, vm_page_t *ma, int max_count);
#endif /* __rtems__ */
void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
int vm_forkproc(struct thread *, struct proc *, struct thread *, struct vmspace *, int);
void vm_waitproc(struct proc *);
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t);
@@ -100,14 +106,9 @@ vsunlock(void *addr, size_t len)
(void) len;
}
#endif /* __rtems__ */
void vm_object_print(/* db_expr_t */ long, boolean_t, /* db_expr_t */ long,
char *);
int vm_fault_quick(caddr_t v, int prot);
struct sf_buf *vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset);
void vm_imgact_unmap_page(struct sf_buf *sf);
void vm_thread_dispose(struct thread *td);
int vm_thread_new(struct thread *td, int pages);
void vm_thread_swapin(struct thread *td);
void vm_thread_swapout(struct thread *td);
#endif /* _KERNEL */
#endif /* !_VM_EXTERN_H_ */