ZONE(9): Fix slab flag handling

Disable unused slab flags.
This commit is contained in:
Sebastian Huber 2015-09-11 11:49:47 +02:00
parent 4c7d385f06
commit 495a768f99
3 changed files with 13 additions and 3 deletions

View File

@ -425,9 +425,9 @@ static void *
mbuf_jumbo_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait) mbuf_jumbo_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
{ {
#ifndef __rtems__
/* Inform UMA that this allocator uses kernel_map/object. */ /* Inform UMA that this allocator uses kernel_map/object. */
*flags = UMA_SLAB_KERNEL; *flags = UMA_SLAB_KERNEL;
#ifndef __rtems__
return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, return ((void *)kmem_alloc_contig(kernel_map, bytes, wait,
(vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT)); (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
#else /* __rtems__ */ #else /* __rtems__ */

View File

@ -542,13 +542,17 @@ void uma_zone_set_freef(uma_zone_t zone, uma_free freef);
/* /*
* These flags are setable in the allocf and visible in the freef. * These flags are setable in the allocf and visible in the freef.
*/ */
#ifndef __rtems__
#define UMA_SLAB_BOOT 0x01 /* Slab alloced from boot pages */ #define UMA_SLAB_BOOT 0x01 /* Slab alloced from boot pages */
#define UMA_SLAB_KMEM 0x02 /* Slab alloced from kmem_map */ #define UMA_SLAB_KMEM 0x02 /* Slab alloced from kmem_map */
#endif /* __rtems__ */
#define UMA_SLAB_KERNEL 0x04 /* Slab alloced from kernel_map */ #define UMA_SLAB_KERNEL 0x04 /* Slab alloced from kernel_map */
#ifndef __rtems__
#define UMA_SLAB_PRIV 0x08 /* Slab alloced from priv allocator */ #define UMA_SLAB_PRIV 0x08 /* Slab alloced from priv allocator */
#define UMA_SLAB_OFFP 0x10 /* Slab is managed separately */ #define UMA_SLAB_OFFP 0x10 /* Slab is managed separately */
#define UMA_SLAB_MALLOC 0x20 /* Slab is a large malloc slab */ #define UMA_SLAB_MALLOC 0x20 /* Slab is a large malloc slab */
/* 0x40 and 0x80 are available */ /* 0x40 and 0x80 are available */
#endif /* __rtems__ */
/* /*
* Used to pre-fill a zone with some number of items * Used to pre-fill a zone with some number of items

View File

@ -722,11 +722,13 @@ keg_drain(uma_keg_t keg)
while (slab) { while (slab) {
n = LIST_NEXT(slab, us_link); n = LIST_NEXT(slab, us_link);
#ifndef __rtems__
/* We have no where to free these to */ /* We have no where to free these to */
if (slab->us_flags & UMA_SLAB_BOOT) { if (slab->us_flags & UMA_SLAB_BOOT) {
slab = n; slab = n;
continue; continue;
} }
#endif /* __rtems__ */
LIST_REMOVE(slab, us_link); LIST_REMOVE(slab, us_link);
keg->uk_pages -= keg->uk_ppera; keg->uk_pages -= keg->uk_ppera;
@ -1023,10 +1025,11 @@ page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
{ {
void *p; /* Returned page */ void *p; /* Returned page */
*pflag = UMA_SLAB_KMEM;
#ifndef __rtems__ #ifndef __rtems__
*pflag = UMA_SLAB_KMEM;
p = (void *) kmem_malloc(kmem_map, bytes, wait); p = (void *) kmem_malloc(kmem_map, bytes, wait);
#else /* __rtems__ */ #else /* __rtems__ */
*pflag = 0;
p = rtems_bsd_page_alloc(bytes, wait); p = rtems_bsd_page_alloc(bytes, wait);
#endif /* __rtems__ */ #endif /* __rtems__ */
@ -1121,7 +1124,10 @@ page_free(void *mem, int size, u_int8_t flags)
kmem_free(map, (vm_offset_t)mem, size); kmem_free(map, (vm_offset_t)mem, size);
#else /* __rtems__ */ #else /* __rtems__ */
rtems_bsd_page_free(mem); if (flags & UMA_SLAB_KERNEL)
free(mem, M_TEMP);
else
rtems_bsd_page_free(mem);
#endif /* __rtems__ */ #endif /* __rtems__ */
} }