Move TIMEOUT(9) and ZONE(9) initialization

Delete most of virtual memory support.
This commit is contained in:
Sebastian Huber
2013-10-18 14:12:17 +02:00
parent a9be4b0481
commit ffcd542fe2
11 changed files with 178 additions and 259 deletions

View File

@@ -93,11 +93,9 @@ void rtems_page_free( void *address );
#include <vm/uma_int.h>
#include <vm/uma_dbg.h>
#ifndef __rtems__
#include <machine/vmparam.h>
#include <ddb/ddb.h>
#endif /* __rtems__ */
/*
* This is the zone and keg from which all zones are spawned. The idea is that
@@ -125,10 +123,12 @@ static int uma_align_cache = 64 - 1;
static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
#ifndef __rtems__
/*
* Are we allowed to allocate buckets?
*/
static int bucketdisable = 1;
#endif /* __rtems__ */
/* Linked list of all kegs in the system */
static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
@@ -136,6 +136,7 @@ static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
/* This mutex protects the keg list */
static struct mtx uma_mtx;
#ifndef __rtems__
/* Linked list of boot time pages */
static LIST_HEAD(,uma_slab) uma_boot_pages =
LIST_HEAD_INITIALIZER(uma_boot_pages);
@@ -145,6 +146,7 @@ static struct mtx uma_boot_pages_mtx;
/* Is the VM done starting up? */
static int booted = 0;
#endif /* __rtems__ */
/* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
static u_int uma_max_ipers;
@@ -220,7 +222,9 @@ enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI };
static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
#endif /* __rtems__ */
static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
#ifndef __rtems__
static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
#endif /* __rtems__ */
static void page_free(void *, int, u_int8_t);
static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
static void cache_drain(uma_zone_t);
@@ -250,7 +254,9 @@ static void bucket_free(uma_bucket_t);
static void bucket_zone_drain(void);
static int zone_alloc_bucket(uma_zone_t zone, int flags);
static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
#ifndef __rtems__
static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
#endif /* __rtems__ */
static void *slab_alloc_item(uma_zone_t zone, uma_slab_t slab);
static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
uma_fini fini, int align, u_int32_t flags);
@@ -285,8 +291,8 @@ bucket_enable(void)
if (cnt.v_free_count < cnt.v_free_min)
bucketdisable = 1;
else
#endif /* __rtems__ */
bucketdisable = 0;
#endif /* __rtems__ */
}
/*
@@ -336,6 +342,7 @@ bucket_alloc(int entries, int bflags)
struct uma_bucket_zone *ubz;
uma_bucket_t bucket;
#ifndef __rtems__
/*
* This is to stop us from allocating per cpu buckets while we're
* running out of vm.boot_pages. Otherwise, we would exhaust the
@@ -344,6 +351,7 @@ bucket_alloc(int entries, int bflags)
*/
if (bucketdisable)
return (NULL);
#endif /* __rtems__ */
ubz = bucket_zone_lookup(entries);
bucket = zone_alloc_item(ubz->ubz_zone, NULL, bflags);
@@ -944,6 +952,7 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
return (slab);
}
#ifndef __rtems__
/*
* This function is intended to be used early on in place of page_alloc() so
* that we may use the boot time page cache to satisfy allocations before
@@ -997,6 +1006,7 @@ startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
#endif
return keg->uk_allocf(zone, bytes, pflag, wait);
}
#endif /* __rtems__ */
/*
* Allocates a number of pages from the system
@@ -1009,11 +1019,6 @@ startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
* A pointer to the alloced memory or possibly
* NULL if M_NOWAIT is set.
*/
#ifdef __rtems__
#define PAGE_MASK (PAGE_SIZE-1)
#define round_page(x) ((((unsigned long )(x)) + PAGE_MASK) & ~(PAGE_MASK))
#endif
static void *
page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
{
@@ -1364,10 +1369,14 @@ keg_ctor(void *mem, int size, void *udata, int flags)
keg->uk_allocf = uma_small_alloc;
keg->uk_freef = uma_small_free;
#endif
#ifndef __rtems__
if (booted == 0)
keg->uk_allocf = startup_alloc;
} else if (booted == 0 && (keg->uk_flags & UMA_ZFLAG_INTERNAL))
keg->uk_allocf = startup_alloc;
#else /* __rtems__ */
}
#endif /* __rtems__ */
/*
* Initialize keg's lock (shared among zones).
@@ -1638,10 +1647,14 @@ void
uma_startup(void *bootmem, int boot_pages)
{
struct uma_zctor_args args;
#ifndef __rtems__
uma_slab_t slab;
#endif /* __rtems__ */
u_int slabsize;
u_int objsize, totsize, wsize;
#ifndef __rtems__
int i;
#endif /* __rtems__ */
#ifdef UMA_DEBUG
printf("Creating uma keg headers zone and keg.\n");
@@ -1732,6 +1745,7 @@ uma_startup(void *bootmem, int boot_pages)
/* The initial zone has no Per cpu queues so it's smaller */
zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
#ifndef __rtems__
#ifdef UMA_DEBUG
printf("Filling boot free list.\n");
#endif
@@ -1742,6 +1756,7 @@ uma_startup(void *bootmem, int boot_pages)
LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
}
mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
#endif /* __rtems__ */
#ifdef UMA_DEBUG
printf("Creating uma zone headers zone and keg.\n");
@@ -1806,19 +1821,31 @@ uma_startup(void *bootmem, int boot_pages)
printf("UMA startup complete.\n");
#endif
}
#ifdef __rtems__
static void
rtems_bsd_uma_startup(void *unused)
{
(void) unused;
uma_startup(NULL, 0);
}
SYSINIT(rtems_bsd_uma_startup, SI_SUB_VM, SI_ORDER_FIRST,
rtems_bsd_uma_startup, NULL);
#endif /* __rtems__ */
#ifndef __rtems__
/* see uma.h */
void
uma_startup2(void)
{
booted = 1;
#ifndef __rtems__
bucket_enable();
#endif /* __rtems__ */
#ifdef UMA_DEBUG
printf("UMA startup2 complete.\n");
#endif
}
#endif /* __rtems__ */
/*
* Initialize our callout handle
@@ -1907,6 +1934,7 @@ uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
return (zone_alloc_item(zones, &args, M_WAITOK));
}
#ifndef __rtems__
static void
zone_lock_pair(uma_zone_t a, uma_zone_t b)
{
@@ -1927,7 +1955,6 @@ zone_unlock_pair(uma_zone_t a, uma_zone_t b)
ZONE_UNLOCK(b);
}
#ifndef __rtems__
int
uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
{
@@ -2284,6 +2311,7 @@ zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
return (NULL);
}
#ifndef __rtems__
/*
* uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns
* with the keg locked. Caller must call zone_relock() afterwards if the
@@ -2360,6 +2388,7 @@ zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
}
return (NULL);
}
#endif /* __rtems__ */
static void *
slab_alloc_item(uma_zone_t zone, uma_slab_t slab)
@@ -3078,9 +3107,7 @@ uma_reclaim(void)
#ifdef UMA_DEBUG
printf("UMA: vm asked us to release pages!\n");
#endif
#ifndef __rtems__
bucket_enable();
#endif /* __rtems__ */
zone_foreach(zone_drain);
/*
* Some slabs may have been freed but this zone will be visited early

110
freebsd/sys/vm/vm_extern.h Normal file
View File

@@ -0,0 +1,110 @@
/*-
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)vm_extern.h 8.2 (Berkeley) 1/12/94
* $FreeBSD$
*/
#ifndef _VM_EXTERN_H_
#define _VM_EXTERN_H_
struct proc;
struct vmspace;
struct vnode;
#ifdef _KERNEL
int kernacc(void *, int, int);
vm_offset_t kmem_alloc(vm_map_t, vm_size_t);
vm_offset_t kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
vm_offset_t kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
unsigned long boundary, vm_memattr_t memattr);
vm_offset_t kmem_alloc_nofault(vm_map_t, vm_size_t);
vm_offset_t kmem_alloc_nofault_space(vm_map_t, vm_size_t, int);
vm_offset_t kmem_alloc_wait(vm_map_t, vm_size_t);
void kmem_free(vm_map_t, vm_offset_t, vm_size_t);
void kmem_free_wakeup(vm_map_t, vm_offset_t, vm_size_t);
void kmem_init(vm_offset_t, vm_offset_t);
vm_offset_t kmem_malloc(vm_map_t map, vm_size_t size, int flags);
int kmem_back(vm_map_t, vm_offset_t, vm_size_t, int);
vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t,
boolean_t);
void swapout_procs(int);
int useracc(void *, int, int);
int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int);
void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t,
vm_ooffset_t *);
void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
int vm_forkproc(struct thread *, struct proc *, struct thread *, struct vmspace *, int);
void vm_waitproc(struct proc *);
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t);
void vm_set_page_size(void);
void vm_sync_icache(vm_map_t, vm_offset_t, vm_size_t);
struct vmspace *vmspace_alloc(vm_offset_t, vm_offset_t);
struct vmspace *vmspace_fork(struct vmspace *, vm_ooffset_t *);
int vmspace_exec(struct proc *, vm_offset_t, vm_offset_t);
int vmspace_unshare(struct proc *);
void vmspace_exit(struct thread *);
struct vmspace *vmspace_acquire_ref(struct proc *);
void vmspace_free(struct vmspace *);
void vmspace_exitfree(struct proc *);
void vnode_pager_setsize(struct vnode *, vm_ooffset_t);
#ifndef __rtems__
int vslock(void *, size_t);
void vsunlock(void *, size_t);
#else /* __rtems__ */
static inline int
vslock(void *addr, size_t len)
{
(void) addr;
(void) len;
return (0);
}
static inline void
vsunlock(void *addr, size_t len)
{
(void) addr;
(void) len;
}
#endif /* __rtems__ */
void vm_object_print(/* db_expr_t */ long, boolean_t, /* db_expr_t */ long,
char *);
int vm_fault_quick(caddr_t v, int prot);
struct sf_buf *vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset);
void vm_imgact_unmap_page(struct sf_buf *sf);
void vm_thread_dispose(struct thread *td);
int vm_thread_new(struct thread *td, int pages);
void vm_thread_swapin(struct thread *td);
void vm_thread_swapout(struct thread *td);
#endif /* _KERNEL */
#endif /* !_VM_EXTERN_H_ */