ZONE(9): Add and use red-black tree chunk alloc

This commit is contained in:
Sebastian Huber 2013-10-25 15:09:17 +02:00
parent 4adeb59b18
commit 3e2938873d
8 changed files with 292 additions and 139 deletions

View File

@ -52,6 +52,7 @@ LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-autoconf.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-bus-dma.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-bus-dma-mbuf.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-cam.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-chunk.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-condvar.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-conf.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-copyinout.c
@ -67,7 +68,6 @@ LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-malloc.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-mutex.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-newproc.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-nexus.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-page.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-panic.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-pci_bus.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-pci_cfgreg.c

View File

@ -609,6 +609,7 @@ rtems.addRTEMSSourceFiles(
'rtems/rtems-bsd-bus-dma.c',
'rtems/rtems-bsd-bus-dma-mbuf.c',
'rtems/rtems-bsd-cam.c',
'rtems/rtems-bsd-chunk.c',
'rtems/rtems-bsd-condvar.c',
'rtems/rtems-bsd-conf.c',
'rtems/rtems-bsd-copyinout.c',
@ -624,7 +625,6 @@ rtems.addRTEMSSourceFiles(
'rtems/rtems-bsd-mutex.c',
'rtems/rtems-bsd-newproc.c',
'rtems/rtems-bsd-nexus.c',
'rtems/rtems-bsd-page.c',
'rtems/rtems-bsd-panic.c',
'rtems/rtems-bsd-pci_bus.c',
'rtems/rtems-bsd-pci_cfgreg.c',

View File

@ -84,11 +84,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
#ifdef __rtems__
void *rtems_page_alloc(int bytes);
void *rtems_page_find( void *address );
void rtems_page_free( void *address );
#endif /* __rtems__ */
#include <vm/uma.h>
#include <vm/uma_int.h>
#include <vm/uma_dbg.h>
@ -97,6 +92,9 @@ void rtems_page_free( void *address );
#include <ddb/ddb.h>
#ifdef __rtems__
rtems_bsd_chunk_control rtems_bsd_uma_chunks;
#endif /* __rtems__ */
/*
* This is the zone and keg from which all zones are spawned. The idea is that
* even the zone & keg heads are allocated from the allocator, so we use the
@ -882,10 +880,12 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
slab = (uma_slab_t )(mem + keg->uk_pgoff);
#ifndef __rtems__
if (keg->uk_flags & UMA_ZONE_VTOSLAB)
#ifndef __rtems__
for (i = 0; i < keg->uk_ppera; i++)
vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
#else /* __rtems__ */
vsetslab((vm_offset_t)mem, slab);
#endif /* __rtems__ */
slab->us_keg = keg;
@ -1028,7 +1028,7 @@ page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
#ifndef __rtems__
p = (void *) kmem_malloc(kmem_map, bytes, wait);
#else /* __rtems__ */
p = rtems_page_alloc(bytes);
p = rtems_bsd_chunk_alloc(&rtems_bsd_uma_chunks, bytes);
#endif /* __rtems__ */
return (p);
@ -1124,7 +1124,7 @@ page_free(void *mem, int size, u_int8_t flags)
kmem_free(map, (vm_offset_t)mem, size);
#else /* __rtems__ */
rtems_page_free( mem );
rtems_bsd_chunk_free(&rtems_bsd_uma_chunks, mem);
#endif /* __rtems__ */
}
@ -1277,9 +1277,7 @@ keg_cachespread_init(uma_keg_t keg)
keg->uk_rsize = rsize;
keg->uk_ppera = pages;
keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
#ifndef __rtems__
keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
#endif /* __rtems__ */
KASSERT(keg->uk_ipers <= uma_max_ipers,
("keg_small_init: keg->uk_ipers too high(%d) increase max_ipers",
keg->uk_ipers));
@ -1324,10 +1322,8 @@ keg_ctor(void *mem, int size, void *udata, int flags)
if (arg->flags & UMA_ZONE_ZINIT)
keg->uk_init = zero_init;
#ifndef __rtems__
if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC)
keg->uk_flags |= UMA_ZONE_VTOSLAB;
#endif /* __rtems__ */
/*
* The +UMA_FRITM_SZ added to uk_size is to account for the
@ -1641,6 +1637,16 @@ zone_foreach(void (*zfunc)(uma_zone_t))
mtx_unlock(&uma_mtx);
}
#ifdef __rtems__
static void
rtems_bsd_uma_chunk_info_ctor(rtems_bsd_chunk_control *self,
rtems_bsd_chunk_info *info)
{
rtems_bsd_uma_chunk_info *uci = (rtems_bsd_uma_chunk_info *) info;
uci->slab = NULL;
}
#endif /* __rtems__ */
/* Public functions */
/* See uma.h */
void
@ -1659,6 +1665,11 @@ uma_startup(void *bootmem, int boot_pages)
#ifdef UMA_DEBUG
printf("Creating uma keg headers zone and keg.\n");
#endif
#ifdef __rtems__
rtems_bsd_chunk_init(&rtems_bsd_uma_chunks,
sizeof(rtems_bsd_uma_chunk_info), rtems_bsd_uma_chunk_info_ctor,
rtems_bsd_chunk_info_dtor_default);
#endif /* __rtems__ */
mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
/*
@ -2812,7 +2823,7 @@ zone_free_item(uma_zone_t zone, void *item, void *udata,
#ifndef __rtems__
mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
#else /* __rtems__ */
mem = rtems_page_find(item);
mem = rtems_bsd_chunk_get_begin(&rtems_bsd_uma_chunks, item);
#endif /* __rtems__ */
keg = zone_first_keg(zone); /* Must only be one. */
if (zone->uz_flags & UMA_ZONE_HASH) {
@ -2822,7 +2833,6 @@ zone_free_item(uma_zone_t zone, void *item, void *udata,
slab = (uma_slab_t)mem;
}
} else {
#ifndef __rtems__
/* This prevents redundant lookups via free(). */
if ((zone->uz_flags & UMA_ZONE_MALLOC) && udata != NULL)
slab = (uma_slab_t)udata;
@ -2830,9 +2840,6 @@ zone_free_item(uma_zone_t zone, void *item, void *udata,
slab = vtoslab((vm_offset_t)item);
keg = slab->us_keg;
keg_relock(keg, zone);
#else /* __rtems__ */
panic("uma virtual memory not supported!" );
#endif /* __rtems__ */
}
MPASS(keg == slab->us_keg);
@ -3089,8 +3096,12 @@ uma_find_refcnt(uma_zone_t zone, void *item)
u_int32_t *refcnt;
int idx;
#ifndef __rtems__
slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item &
(~UMA_SLAB_MASK));
#else /* __rtems__ */
slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item);
#endif /* __rtems__ */
keg = slabref->us_keg;
KASSERT(slabref != NULL && slabref->us_keg->uk_flags & UMA_ZONE_REFCNT,
("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
@ -3149,9 +3160,7 @@ uma_large_malloc(int size, int wait)
return (NULL);
mem = page_alloc(NULL, size, &flags, wait);
if (mem) {
#ifndef __rtems__
vsetslab((vm_offset_t)mem, slab);
#endif /* __rtems__ */
slab->us_data = mem;
slab->us_flags = flags | UMA_SLAB_MALLOC;
slab->us_size = size;

View File

@ -391,6 +391,16 @@ hash_sfind(struct uma_hash *hash, u_int8_t *data)
return (NULL);
}
#ifdef __rtems__
#include <machine/rtems-bsd-chunk.h>
typedef struct {
rtems_bsd_chunk_info chunk_info;
uma_slab_t slab;
} rtems_bsd_uma_chunk_info;
extern rtems_bsd_chunk_control rtems_bsd_uma_chunks;
#endif /* __rtems__ */
static __inline uma_slab_t
vtoslab(vm_offset_t va)
{
@ -406,21 +416,31 @@ vtoslab(vm_offset_t va)
else
return (NULL);
#else /* __rtems__ */
return (NULL); /* XXX - FIX THIS!!! */
rtems_bsd_uma_chunk_info *uci = (rtems_bsd_uma_chunk_info *)
rtems_bsd_chunk_get_info(&rtems_bsd_uma_chunks, (void *) va);
return uci->slab;
#endif /* __rtems__ */
}
#ifndef __rtems__
static __inline void
vsetslab(vm_offset_t va, uma_slab_t slab)
{
#ifndef __rtems__
vm_page_t p;
p = PHYS_TO_VM_PAGE(pmap_kextract(va));
p->object = (vm_object_t)slab;
p->flags |= PG_SLAB;
#else /* __rtems__ */
rtems_bsd_uma_chunk_info *uci = (rtems_bsd_uma_chunk_info *)
rtems_bsd_chunk_get_info(&rtems_bsd_uma_chunks, (void *) va);
uci->slab = slab;
#endif /* __rtems__ */
}
#ifndef __rtems__
static __inline void
vsetobj(vm_offset_t va, vm_object_t obj)
{

View File

@ -94,6 +94,8 @@ the current Git submodule commit is this
* What to do with the priority parameter present in the FreeBSD synchronization
primitives?
* ZONE(9): Review allocator lock usage in rtems-bsd-chunk.c.
[listing]
----
/* sysinit section? */

View File

@ -0,0 +1,97 @@
/**
* @file
*
* @ingroup rtems_bsd_machine
*
* @brief TODO.
*/
/*
* Copyright (c) 2013 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
* 82178 Puchheim
* Germany
* <rtems@embedded-brains.de>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CHUNK_H_
#define _RTEMS_BSD_MACHINE_RTEMS_BSD_CHUNK_H_
/*
* A chunk is a fixed size memory area with some meta information attached to
* it. This API is used by ZONE(9).
*/
#include <sys/cdefs.h>
#include <rtems/rbtree.h>
__BEGIN_DECLS
typedef struct rtems_bsd_chunk_info rtems_bsd_chunk_info;
typedef struct rtems_bsd_chunk_control rtems_bsd_chunk_control;
typedef void (*rtems_bsd_chunk_info_ctor)(rtems_bsd_chunk_control *self,
rtems_bsd_chunk_info *info);
typedef void (*rtems_bsd_chunk_info_dtor)(rtems_bsd_chunk_control *self,
rtems_bsd_chunk_info *info);
struct rtems_bsd_chunk_info {
rtems_rbtree_node node;
uintptr_t begin;
uintptr_t end;
};
struct rtems_bsd_chunk_control {
rtems_rbtree_control chunks;
uintptr_t info_size;
rtems_bsd_chunk_info_ctor info_ctor;
rtems_bsd_chunk_info_dtor info_dtor;
};
void rtems_bsd_chunk_init(rtems_bsd_chunk_control *self, uintptr_t info_size,
rtems_bsd_chunk_info_ctor info_ctor, rtems_bsd_chunk_info_dtor info_dtor);
void *rtems_bsd_chunk_alloc(rtems_bsd_chunk_control *self,
uintptr_t chunk_size);
void rtems_bsd_chunk_free(rtems_bsd_chunk_control *self,
void *some_addr_in_chunk);
rtems_bsd_chunk_info *rtems_bsd_chunk_get_info(rtems_bsd_chunk_control *self,
void *some_addr_in_chunk);
void *rtems_bsd_chunk_get_begin(rtems_bsd_chunk_control *self,
void *some_addr_in_chunk);
void rtems_bsd_chunk_info_dtor_default(rtems_bsd_chunk_control *self,
rtems_bsd_chunk_info *info);
__END_DECLS
#endif /* _RTEMS_BSD_MACHINE_RTEMS_BSD_CHUNK_H_ */

View File

@ -0,0 +1,141 @@
/**
* @file
*
* @ingroup rtems_bsd_rtems
*
* @brief TODO.
*/
/*
* Copyright (c) 2013 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
* 82178 Puchheim
* Germany
* <rtems@embedded-brains.de>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/rtems-bsd-config.h>
#include <machine/rtems-bsd-chunk.h>
#include <sys/malloc.h>
#include <rtems/score/apimutex.h>
#define chunk_of_node(n) ((rtems_bsd_chunk_info *) n)
static int
chunk_compare(const rtems_rbtree_node *a, const rtems_rbtree_node *b)
{
const rtems_bsd_chunk_info *left = chunk_of_node(a);
const rtems_bsd_chunk_info *right = chunk_of_node(b);
if (left->begin < right->begin) {
return -1;
} else if (left->begin < right->end) {
return 0;
} else {
return 1;
}
}
void
rtems_bsd_chunk_init(rtems_bsd_chunk_control *self, uintptr_t info_size,
rtems_bsd_chunk_info_ctor info_ctor, rtems_bsd_chunk_info_dtor info_dtor)
{
self->info_size = info_size;
self->info_ctor = info_ctor;
self->info_dtor = info_dtor;
rtems_rbtree_initialize_empty(&self->chunks, chunk_compare, true);
}
void *
rtems_bsd_chunk_alloc(rtems_bsd_chunk_control *self, uintptr_t chunk_size)
{
char *p = malloc(chunk_size + self->info_size, M_TEMP, M_WAITOK);
if (p != NULL) {
rtems_bsd_chunk_info *info = (rtems_bsd_chunk_info *) p;
p += self->info_size;
info->begin = (uintptr_t) p;
info->end = (uintptr_t) p + chunk_size;
(*self->info_ctor)(self, info);
_RTEMS_Lock_allocator();
rtems_rbtree_insert_unprotected(&self->chunks, &info->node);
_RTEMS_Unlock_allocator();
}
return p;
}
void
rtems_bsd_chunk_free(rtems_bsd_chunk_control *self,
void *some_addr_in_chunk)
{
rtems_bsd_chunk_info *info = rtems_bsd_chunk_get_info(self,
some_addr_in_chunk);
_RTEMS_Lock_allocator();
rtems_rbtree_extract_unprotected(&self->chunks, &info->node);
_RTEMS_Unlock_allocator();
(*self->info_dtor)(self, info);
free(info, M_TEMP);
}
rtems_bsd_chunk_info *
rtems_bsd_chunk_get_info(rtems_bsd_chunk_control *self,
void *some_addr_in_chunk)
{
rtems_bsd_chunk_info find_me = {
.begin = (uintptr_t) some_addr_in_chunk
};
return chunk_of_node(rtems_rbtree_find_unprotected(&self->chunks,
&find_me.node));
}
void *
rtems_bsd_chunk_get_begin(rtems_bsd_chunk_control *self,
void *some_addr_in_chunk)
{
rtems_bsd_chunk_info *info = rtems_bsd_chunk_get_info(self,
some_addr_in_chunk);
return (void *) info->begin;
}
void
rtems_bsd_chunk_info_dtor_default(rtems_bsd_chunk_control *self,
rtems_bsd_chunk_info *info)
{
(void) self;
(void) info;
}

View File

@ -1,116 +0,0 @@
/**
* @file
*
* @ingroup rtems_bsd_rtems
*
* @brief FreeBSD uma source used pages which where there was an
* assumption of page alignment. Doing this alignment would waste
* more memory than we were willing to do. Therefore, a set of
* rtems-bsd-page routines track the allocation of pages and the
* small sections of source in the uma source were modified to use
* these methods on an rtems system.
*/
/*
* COPYRIGHT (c) 2012. On-Line Applications Research Corporation (OAR).
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/rtems-bsd-config.h>
#include <rtems/bsd/sys/types.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <rtems/chain.h>
#define RTEMS_PAGE_COUNT 100
typedef struct {
rtems_chain_node node;
void *page;
void *end;
} rtems_page_t;
// chain of pages that have been allocated
static rtems_chain_control rtems_page_list = RTEMS_CHAIN_INITIALIZER_EMPTY( rtems_page_list );
void *
rtems_page_alloc(int bytes)
{
static void *p;
static rtems_page_t *page;
bytes = round_page(bytes);
p = (void *) malloc(bytes, M_TEMP, M_NOWAIT | M_ZERO);
page = (rtems_page_t *) malloc(sizeof(rtems_page_t), M_TEMP, M_NOWAIT | M_ZERO);
page->page = p;
page->end = p + bytes;
rtems_chain_append( &rtems_page_list, page );
return p;
}
rtems_page_t *rtems_page_t_find( void *address )
{
rtems_chain_node *the_node;
rtems_page_t *the_page = NULL;
for (the_node = rtems_chain_first( &rtems_page_list );
!rtems_chain_is_tail(&rtems_page_list, the_node);
the_node = rtems_chain_next(the_node)) {
the_page = the_node;
if ((address >= the_page->page) &&
(address <= the_page->end))
return the_page;
}
return NULL;
}
void *rtems_page_find( void *address )
{
rtems_page_t *ptr;
ptr = rtems_page_t_find( address );
if (ptr)
return ptr->page;
return ptr;
}
void rtems_page_free( void *address )
{
rtems_page_t *ptr;
ptr = rtems_page_t_find( address );
KASSERT(ptr != NULL, ("Unable to locate page for freed element"));
free( ptr->page, M_TEMP );
free( ptr, M_TEMP );
}