mirror of
https://git.rtems.org/rtems-libbsd/
synced 2025-10-14 07:15:58 +08:00
Update to FreeBSD head 2018-02-01
Git mirror commit d079ae0442af8fa3cfd6d7ede190d04e64a2c0d4. Update #3472.
This commit is contained in:
@@ -128,7 +128,8 @@ typedef void (*uma_fini)(void *mem, int size);
|
||||
/*
|
||||
* Import new memory into a cache zone.
|
||||
*/
|
||||
typedef int (*uma_import)(void *arg, void **store, int count, int flags);
|
||||
typedef int (*uma_import)(void *arg, void **store, int count, int domain,
|
||||
int flags);
|
||||
|
||||
/*
|
||||
* Free memory from a cache zone.
|
||||
@@ -281,6 +282,10 @@ uma_zone_t uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
|
||||
* Allocates mp_maxid + 1 slabs sized to
|
||||
* sizeof(struct pcpu).
|
||||
*/
|
||||
#define UMA_ZONE_NUMA 0x10000 /*
|
||||
* NUMA aware Zone. Implements a best
|
||||
* effort first-touch policy.
|
||||
*/
|
||||
|
||||
/*
|
||||
* These flags are shared between the keg and zone. In zones wishing to add
|
||||
@@ -325,6 +330,19 @@ void uma_zdestroy(uma_zone_t zone);
|
||||
|
||||
void *uma_zalloc_arg(uma_zone_t zone, void *arg, int flags);
|
||||
|
||||
/*
|
||||
* Allocate an item from a specific NUMA domain. This uses a slow path in
|
||||
* the allocator but is guaranteed to allocate memory from the requested
|
||||
* domain if M_WAITOK is set.
|
||||
*
|
||||
* Arguments:
|
||||
* zone The zone we are allocating from
|
||||
* arg This data is passed to the ctor function
|
||||
* domain The domain to allocate from.
|
||||
* flags See sys/malloc.h for available flags.
|
||||
*/
|
||||
void *uma_zalloc_domain(uma_zone_t zone, void *arg, int domain, int flags);
|
||||
|
||||
/*
|
||||
* Allocates an item out of a zone without supplying an argument
|
||||
*
|
||||
@@ -353,6 +371,16 @@ uma_zalloc(uma_zone_t zone, int flags)
|
||||
|
||||
void uma_zfree_arg(uma_zone_t zone, void *item, void *arg);
|
||||
|
||||
/*
|
||||
* Frees an item back to the specified zone's domain specific pool.
|
||||
*
|
||||
* Arguments:
|
||||
* zone The zone the item was originally allocated out of.
|
||||
* item The memory to be freed.
|
||||
* arg Argument passed to the destructor
|
||||
*/
|
||||
void uma_zfree_domain(uma_zone_t zone, void *item, void *arg);
|
||||
|
||||
/*
|
||||
* Frees an item back to a zone without supplying an argument
|
||||
*
|
||||
@@ -372,11 +400,6 @@ uma_zfree(uma_zone_t zone, void *item)
|
||||
*/
|
||||
void uma_zwait(uma_zone_t zone);
|
||||
|
||||
/*
|
||||
* XXX The rest of the prototypes in this header are h0h0 magic for the VM.
|
||||
* If you think you need to use it for a normal zone you're probably incorrect.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Backend page supplier routines
|
||||
*
|
||||
@@ -384,14 +407,15 @@ void uma_zwait(uma_zone_t zone);
|
||||
* zone The zone that is requesting pages.
|
||||
* size The number of bytes being requested.
|
||||
* pflag Flags for these memory pages, see below.
|
||||
* domain The NUMA domain that we prefer for this allocation.
|
||||
* wait Indicates our willingness to block.
|
||||
*
|
||||
* Returns:
|
||||
* A pointer to the allocated memory or NULL on failure.
|
||||
*/
|
||||
|
||||
typedef void *(*uma_alloc)(uma_zone_t zone, vm_size_t size, uint8_t *pflag,
|
||||
int wait);
|
||||
typedef void *(*uma_alloc)(uma_zone_t zone, vm_size_t size, int domain,
|
||||
uint8_t *pflag, int wait);
|
||||
|
||||
/*
|
||||
* Backend page free routines
|
||||
@@ -406,8 +430,6 @@ typedef void *(*uma_alloc)(uma_zone_t zone, vm_size_t size, uint8_t *pflag,
|
||||
*/
|
||||
typedef void (*uma_free)(void *item, vm_size_t size, uint8_t pflag);
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Sets up the uma allocator. (Called by vm_mem_init)
|
||||
*
|
||||
@@ -702,6 +724,14 @@ struct uma_percpu_stat {
|
||||
void uma_reclaim_wakeup(void);
|
||||
void uma_reclaim_worker(void *);
|
||||
|
||||
unsigned long uma_limit(void);
|
||||
|
||||
/* Return the amount of memory managed by UMA. */
|
||||
unsigned long uma_size(void);
|
||||
|
||||
/* Return the amount of memory remaining. May be negative. */
|
||||
long uma_avail(void);
|
||||
|
||||
#ifdef __rtems__
|
||||
void rtems_uma_drain_timeout(void);
|
||||
#endif /* __rtems__ */
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -39,7 +39,22 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* Here's a quick description of the relationship between the objects:
|
||||
* The brief summary; Zones describe unique allocation types. Zones are
|
||||
* organized into per-CPU caches which are filled by buckets. Buckets are
|
||||
* organized according to memory domains. Buckets are filled from kegs which
|
||||
* are also organized according to memory domains. Kegs describe a unique
|
||||
* allocation type, backend memory provider, and layout. Kegs are associated
|
||||
* with one or more zones and zones reference one or more kegs. Kegs provide
|
||||
* slabs which are virtually contiguous collections of pages. Each slab is
|
||||
* broken down int one or more items that will satisfy an individual allocation.
|
||||
*
|
||||
* Allocation is satisfied in the following order:
|
||||
* 1) Per-CPU cache
|
||||
* 2) Per-domain cache of buckets
|
||||
* 3) Slab from any of N kegs
|
||||
* 4) Backend page provider
|
||||
*
|
||||
* More detail on individual objects is contained below:
|
||||
*
|
||||
* Kegs contain lists of slabs which are stored in either the full bin, empty
|
||||
* bin, or partially allocated bin, to reduce fragmentation. They also contain
|
||||
@@ -47,6 +62,13 @@
|
||||
* and rsize is the result of that. The Keg also stores information for
|
||||
* managing a hash of page addresses that maps pages to uma_slab_t structures
|
||||
* for pages that don't have embedded uma_slab_t's.
|
||||
*
|
||||
* Keg slab lists are organized by memory domain to support NUMA allocation
|
||||
* policies. By default allocations are spread across domains to reduce the
|
||||
* potential for hotspots. Special keg creation flags may be specified to
|
||||
* prefer location allocation. However there is no strict enforcement as frees
|
||||
* may happen on any CPU and these are returned to the CPU-local cache
|
||||
* regardless of the originating domain.
|
||||
*
|
||||
* The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
|
||||
* be allocated off the page from a special slab zone. The free list within a
|
||||
@@ -181,6 +203,17 @@ struct uma_cache {
|
||||
|
||||
typedef struct uma_cache * uma_cache_t;
|
||||
|
||||
/*
|
||||
* Per-domain memory list. Embedded in the kegs.
|
||||
*/
|
||||
struct uma_domain {
|
||||
LIST_HEAD(,uma_slab) ud_part_slab; /* partially allocated slabs */
|
||||
LIST_HEAD(,uma_slab) ud_free_slab; /* empty slab list */
|
||||
LIST_HEAD(,uma_slab) ud_full_slab; /* full slabs */
|
||||
};
|
||||
|
||||
typedef struct uma_domain * uma_domain_t;
|
||||
|
||||
/*
|
||||
* Keg management structure
|
||||
*
|
||||
@@ -192,10 +225,8 @@ struct uma_keg {
|
||||
struct uma_hash uk_hash;
|
||||
|
||||
LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
|
||||
LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */
|
||||
LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */
|
||||
LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */
|
||||
|
||||
uint32_t uk_cursor; /* Domain alloc cursor. */
|
||||
uint32_t uk_align; /* Alignment mask */
|
||||
uint32_t uk_pages; /* Total page count */
|
||||
uint32_t uk_free; /* Count of items free in slabs */
|
||||
@@ -221,6 +252,9 @@ struct uma_keg {
|
||||
/* Least used fields go to the last cache line. */
|
||||
const char *uk_name; /* Name of creating zone. */
|
||||
LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
|
||||
|
||||
/* Must be last, variable sized. */
|
||||
struct uma_domain uk_domain[]; /* Keg's slab lists. */
|
||||
};
|
||||
typedef struct uma_keg * uma_keg_t;
|
||||
|
||||
@@ -250,7 +284,7 @@ struct uma_slab {
|
||||
#endif
|
||||
uint16_t us_freecount; /* How many are free? */
|
||||
uint8_t us_flags; /* Page flags see uma.h */
|
||||
uint8_t us_pad; /* Pad to 32bits, unused. */
|
||||
uint8_t us_domain; /* Backing NUMA domain. */
|
||||
};
|
||||
|
||||
#define us_link us_type._us_link
|
||||
@@ -258,8 +292,12 @@ struct uma_slab {
|
||||
#define us_size us_type._us_size
|
||||
#endif /* __rtems__ */
|
||||
|
||||
#if MAXMEMDOM >= 255
|
||||
#error "Slab domain type insufficient"
|
||||
#endif
|
||||
|
||||
typedef struct uma_slab * uma_slab_t;
|
||||
typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int);
|
||||
typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int, int);
|
||||
|
||||
struct uma_klink {
|
||||
LIST_ENTRY(uma_klink) kl_link;
|
||||
@@ -267,6 +305,12 @@ struct uma_klink {
|
||||
};
|
||||
typedef struct uma_klink *uma_klink_t;
|
||||
|
||||
struct uma_zone_domain {
|
||||
LIST_HEAD(,uma_bucket) uzd_buckets; /* full buckets */
|
||||
};
|
||||
|
||||
typedef struct uma_zone_domain * uma_zone_domain_t;
|
||||
|
||||
/*
|
||||
* Zone management structure
|
||||
*
|
||||
@@ -279,7 +323,7 @@ struct uma_zone {
|
||||
const char *uz_name; /* Text name of the zone */
|
||||
|
||||
LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
|
||||
LIST_HEAD(,uma_bucket) uz_buckets; /* full buckets */
|
||||
struct uma_zone_domain *uz_domain; /* per-domain buckets */
|
||||
|
||||
LIST_HEAD(,uma_klink) uz_kegs; /* List of kegs. */
|
||||
struct uma_klink uz_klink; /* klink for first keg. */
|
||||
@@ -313,7 +357,9 @@ struct uma_zone {
|
||||
* This HAS to be the last item because we adjust the zone size
|
||||
* based on NCPU and then allocate the space for the zones.
|
||||
*/
|
||||
struct uma_cache uz_cpu[1]; /* Per cpu caches */
|
||||
struct uma_cache uz_cpu[]; /* Per cpu caches */
|
||||
|
||||
/* uz_domain follows here. */
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -344,6 +390,7 @@ zone_first_keg(uma_zone_t zone)
|
||||
/* Internal prototypes */
|
||||
static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
|
||||
void *uma_large_malloc(vm_size_t size, int wait);
|
||||
void *uma_large_malloc_domain(vm_size_t size, int domain, int wait);
|
||||
void uma_large_free(uma_slab_t slab);
|
||||
|
||||
/* Lock Macros */
|
||||
@@ -437,16 +484,12 @@ vsetslab(vm_offset_t va, uma_slab_t slab)
|
||||
* if they can provide more efficient allocation functions. This is useful
|
||||
* for using direct mapped addresses.
|
||||
*/
|
||||
void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag,
|
||||
int wait);
|
||||
void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
|
||||
uint8_t *pflag, int wait);
|
||||
void uma_small_free(void *mem, vm_size_t size, uint8_t flags);
|
||||
|
||||
/* Set a global soft limit on UMA managed memory. */
|
||||
void uma_set_limit(unsigned long limit);
|
||||
unsigned long uma_limit(void);
|
||||
|
||||
/* Return the amount of memory managed by UMA. */
|
||||
unsigned long uma_size(void);
|
||||
#endif /* _KERNEL */
|
||||
|
||||
#endif /* VM_UMA_INT_H */
|
||||
|
@@ -146,6 +146,8 @@ extern void vm_ksubmap_init(struct kva_md_info *);
|
||||
|
||||
extern int old_mlock;
|
||||
|
||||
#define vm_ndomains 1
|
||||
|
||||
struct ucred;
|
||||
int swap_reserve(vm_ooffset_t incr);
|
||||
int swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred);
|
||||
|
@@ -56,14 +56,21 @@ void kmap_free_wakeup(vm_map_t, vm_offset_t, vm_size_t);
|
||||
/* These operate on virtual addresses backed by memory. */
|
||||
vm_offset_t kmem_alloc_attr(struct vmem *, vm_size_t size, int flags,
|
||||
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
|
||||
vm_offset_t kmem_alloc_attr_domain(int domain, vm_size_t size, int flags,
|
||||
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
|
||||
vm_offset_t kmem_alloc_contig(struct vmem *, vm_size_t size, int flags,
|
||||
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
|
||||
vm_memattr_t memattr);
|
||||
vm_offset_t kmem_alloc_contig_domain(int domain, vm_size_t size, int flags,
|
||||
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
|
||||
vm_memattr_t memattr);
|
||||
vm_offset_t kmem_malloc(struct vmem *, vm_size_t size, int flags);
|
||||
vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags);
|
||||
void kmem_free(struct vmem *, vm_offset_t, vm_size_t);
|
||||
|
||||
/* This provides memory for previously allocated address space. */
|
||||
int kmem_back(vm_object_t, vm_offset_t, vm_size_t, int);
|
||||
int kmem_back_domain(int, vm_object_t, vm_offset_t, vm_size_t, int);
|
||||
void kmem_unback(vm_object_t, vm_offset_t, vm_size_t);
|
||||
|
||||
/* Bootstrapping. */
|
||||
|
Reference in New Issue
Block a user