diff --git a/Makefile b/Makefile index 79ad109e..9f4c4fff 100644 --- a/Makefile +++ b/Makefile @@ -87,7 +87,6 @@ LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-sysctlnametomib.c LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-taskqueue.c LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-thread.c LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-timesupport.c -LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-vm_glue.c LIB_C_FILES += rtemsbsd/rtems/rtems-kvm.c LIB_C_FILES += rtemsbsd/rtems/rtems-net-setup.c LIB_C_FILES += rtemsbsd/rtems/rtems-syslog-initialize.c diff --git a/freebsd-to-rtems.py b/freebsd-to-rtems.py index 0d0669fa..1fa64d31 100755 --- a/freebsd-to-rtems.py +++ b/freebsd-to-rtems.py @@ -644,7 +644,6 @@ rtems.addRTEMSSourceFiles( 'rtems/rtems-bsd-taskqueue.c', 'rtems/rtems-bsd-thread.c', 'rtems/rtems-bsd-timesupport.c', - 'rtems/rtems-bsd-vm_glue.c', 'rtems/rtems-kvm.c', 'rtems/rtems-net-setup.c', 'rtems/rtems-syslog-initialize.c', @@ -778,6 +777,7 @@ base.addHeaderFiles( 'sys/vm/uma_dbg.h', 'sys/vm/uma.h', 'sys/vm/uma_int.h', + 'sys/vm/vm_extern.h', 'sys/vm/vm.h', ] ) diff --git a/freebsd/sys/kern/kern_sysctl.c b/freebsd/sys/kern/kern_sysctl.c index de597bff..a1270a5c 100644 --- a/freebsd/sys/kern/kern_sysctl.c +++ b/freebsd/sys/kern/kern_sysctl.c @@ -67,10 +67,8 @@ __FBSDID("$FreeBSD$"); #include #include -#ifndef __rtems__ #include #include -#endif #ifdef __rtems__ /* From FreeBSD file 'sys/kern/kern_mib.c' */ @@ -1207,10 +1205,8 @@ kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old, error = sysctl_root(0, name, namelen, &req); SYSCTL_SUNLOCK(); -#ifndef __rtems__ if (req.lock == REQ_WIRED && req.validlen > 0) vsunlock(req.oldptr, req.validlen); -#endif /* __rtems__ */ if (error && error != ENOMEM) return (error); diff --git a/freebsd/sys/kern/kern_timeout.c b/freebsd/sys/kern/kern_timeout.c index 67924d08..1d5dbadf 100644 --- a/freebsd/sys/kern/kern_timeout.c +++ b/freebsd/sys/kern/kern_timeout.c @@ -157,10 +157,27 @@ MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); * This code is called very early in the kernel initialization sequence, * and may be called more then once. */ +#ifdef __rtems__ +static void rtems_bsd_timeout_init(void *); + +static void callout_cpu_init(struct callout_cpu *); + +SYSINIT(rtems_bsd_timeout, SI_SUB_VM, SI_ORDER_FIRST, rtems_bsd_timeout_init, + NULL); + +static void +rtems_bsd_timeout_init(void *unused) +#else /* __rtems__ */ caddr_t kern_timeout_callwheel_alloc(caddr_t v) +#endif /* __rtems__ */ { struct callout_cpu *cc; +#ifdef __rtems__ + caddr_t v; + + (void) unused; +#endif /* __rtems__ */ timeout_cpu = PCPU_GET(cpuid); cc = CC_CPU(timeout_cpu); @@ -173,11 +190,19 @@ kern_timeout_callwheel_alloc(caddr_t v) ; callwheelmask = callwheelsize - 1; +#ifdef __rtems__ + v = malloc(ncallout * sizeof(*cc->cc_callout) + callwheelsize + * sizeof(*cc->cc_callwheel), M_CALLOUT, M_ZERO | M_WAITOK); +#endif /* __rtems__ */ cc->cc_callout = (struct callout *)v; v = (caddr_t)(cc->cc_callout + ncallout); cc->cc_callwheel = (struct callout_tailq *)v; v = (caddr_t)(cc->cc_callwheel + callwheelsize); +#ifndef __rtems__ return(v); +#else /* __rtems__ */ + callout_cpu_init(cc); +#endif /* __rtems__ */ } static void @@ -201,6 +226,7 @@ callout_cpu_init(struct callout_cpu *cc) } } +#ifndef __rtems__ /* * kern_timeout_callwheel_init() - initialize previously reserved callwheel * space. @@ -213,6 +239,7 @@ kern_timeout_callwheel_init(void) { callout_cpu_init(CC_CPU(timeout_cpu)); } +#endif /* __rtems__ */ /* * Start standard softclock thread. diff --git a/freebsd/sys/vm/uma_core.c b/freebsd/sys/vm/uma_core.c index 8d62d559..f4a906df 100644 --- a/freebsd/sys/vm/uma_core.c +++ b/freebsd/sys/vm/uma_core.c @@ -93,11 +93,9 @@ void rtems_page_free( void *address ); #include #include -#ifndef __rtems__ #include #include -#endif /* __rtems__ */ /* * This is the zone and keg from which all zones are spawned. The idea is that @@ -125,10 +123,12 @@ static int uma_align_cache = 64 - 1; static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); +#ifndef __rtems__ /* * Are we allowed to allocate buckets? */ static int bucketdisable = 1; +#endif /* __rtems__ */ /* Linked list of all kegs in the system */ static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); @@ -136,6 +136,7 @@ static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); /* This mutex protects the keg list */ static struct mtx uma_mtx; +#ifndef __rtems__ /* Linked list of boot time pages */ static LIST_HEAD(,uma_slab) uma_boot_pages = LIST_HEAD_INITIALIZER(uma_boot_pages); @@ -145,6 +146,7 @@ static struct mtx uma_boot_pages_mtx; /* Is the VM done starting up? */ static int booted = 0; +#endif /* __rtems__ */ /* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */ static u_int uma_max_ipers; @@ -220,7 +222,9 @@ enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI }; static void *obj_alloc(uma_zone_t, int, u_int8_t *, int); #endif /* __rtems__ */ static void *page_alloc(uma_zone_t, int, u_int8_t *, int); +#ifndef __rtems__ static void *startup_alloc(uma_zone_t, int, u_int8_t *, int); +#endif /* __rtems__ */ static void page_free(void *, int, u_int8_t); static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int); static void cache_drain(uma_zone_t); @@ -250,7 +254,9 @@ static void bucket_free(uma_bucket_t); static void bucket_zone_drain(void); static int zone_alloc_bucket(uma_zone_t zone, int flags); static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags); +#ifndef __rtems__ static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags); +#endif /* __rtems__ */ static void *slab_alloc_item(uma_zone_t zone, uma_slab_t slab); static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, int align, u_int32_t flags); @@ -285,8 +291,8 @@ bucket_enable(void) if (cnt.v_free_count < cnt.v_free_min) bucketdisable = 1; else -#endif /* __rtems__ */ bucketdisable = 0; +#endif /* __rtems__ */ } /* @@ -336,6 +342,7 @@ bucket_alloc(int entries, int bflags) struct uma_bucket_zone *ubz; uma_bucket_t bucket; +#ifndef __rtems__ /* * This is to stop us from allocating per cpu buckets while we're * running out of vm.boot_pages. Otherwise, we would exhaust the @@ -344,6 +351,7 @@ bucket_alloc(int entries, int bflags) */ if (bucketdisable) return (NULL); +#endif /* __rtems__ */ ubz = bucket_zone_lookup(entries); bucket = zone_alloc_item(ubz->ubz_zone, NULL, bflags); @@ -944,6 +952,7 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait) return (slab); } +#ifndef __rtems__ /* * This function is intended to be used early on in place of page_alloc() so * that we may use the boot time page cache to satisfy allocations before @@ -997,6 +1006,7 @@ startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) #endif return keg->uk_allocf(zone, bytes, pflag, wait); } +#endif /* __rtems__ */ /* * Allocates a number of pages from the system @@ -1009,11 +1019,6 @@ startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) * A pointer to the alloced memory or possibly * NULL if M_NOWAIT is set. */ -#ifdef __rtems__ -#define PAGE_MASK (PAGE_SIZE-1) - -#define round_page(x) ((((unsigned long )(x)) + PAGE_MASK) & ~(PAGE_MASK)) -#endif static void * page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) { @@ -1364,10 +1369,14 @@ keg_ctor(void *mem, int size, void *udata, int flags) keg->uk_allocf = uma_small_alloc; keg->uk_freef = uma_small_free; #endif +#ifndef __rtems__ if (booted == 0) keg->uk_allocf = startup_alloc; } else if (booted == 0 && (keg->uk_flags & UMA_ZFLAG_INTERNAL)) keg->uk_allocf = startup_alloc; +#else /* __rtems__ */ + } +#endif /* __rtems__ */ /* * Initialize keg's lock (shared among zones). @@ -1638,10 +1647,14 @@ void uma_startup(void *bootmem, int boot_pages) { struct uma_zctor_args args; +#ifndef __rtems__ uma_slab_t slab; +#endif /* __rtems__ */ u_int slabsize; u_int objsize, totsize, wsize; +#ifndef __rtems__ int i; +#endif /* __rtems__ */ #ifdef UMA_DEBUG printf("Creating uma keg headers zone and keg.\n"); @@ -1732,6 +1745,7 @@ uma_startup(void *bootmem, int boot_pages) /* The initial zone has no Per cpu queues so it's smaller */ zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK); +#ifndef __rtems__ #ifdef UMA_DEBUG printf("Filling boot free list.\n"); #endif @@ -1742,6 +1756,7 @@ uma_startup(void *bootmem, int boot_pages) LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link); } mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF); +#endif /* __rtems__ */ #ifdef UMA_DEBUG printf("Creating uma zone headers zone and keg.\n"); @@ -1806,19 +1821,31 @@ uma_startup(void *bootmem, int boot_pages) printf("UMA startup complete.\n"); #endif } +#ifdef __rtems__ +static void +rtems_bsd_uma_startup(void *unused) +{ + (void) unused; + uma_startup(NULL, 0); +} + +SYSINIT(rtems_bsd_uma_startup, SI_SUB_VM, SI_ORDER_FIRST, + rtems_bsd_uma_startup, NULL); +#endif /* __rtems__ */ + +#ifndef __rtems__ /* see uma.h */ void uma_startup2(void) { booted = 1; -#ifndef __rtems__ bucket_enable(); -#endif /* __rtems__ */ #ifdef UMA_DEBUG printf("UMA startup2 complete.\n"); #endif } +#endif /* __rtems__ */ /* * Initialize our callout handle @@ -1907,6 +1934,7 @@ uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, return (zone_alloc_item(zones, &args, M_WAITOK)); } +#ifndef __rtems__ static void zone_lock_pair(uma_zone_t a, uma_zone_t b) { @@ -1927,7 +1955,6 @@ zone_unlock_pair(uma_zone_t a, uma_zone_t b) ZONE_UNLOCK(b); } -#ifndef __rtems__ int uma_zsecond_add(uma_zone_t zone, uma_zone_t master) { @@ -2284,6 +2311,7 @@ zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags) return (NULL); } +#ifndef __rtems__ /* * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns * with the keg locked. Caller must call zone_relock() afterwards if the @@ -2360,6 +2388,7 @@ zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags) } return (NULL); } +#endif /* __rtems__ */ static void * slab_alloc_item(uma_zone_t zone, uma_slab_t slab) @@ -3078,9 +3107,7 @@ uma_reclaim(void) #ifdef UMA_DEBUG printf("UMA: vm asked us to release pages!\n"); #endif -#ifndef __rtems__ bucket_enable(); -#endif /* __rtems__ */ zone_foreach(zone_drain); /* * Some slabs may have been freed but this zone will be visited early diff --git a/freebsd/sys/vm/vm_extern.h b/freebsd/sys/vm/vm_extern.h new file mode 100644 index 00000000..76f53802 --- /dev/null +++ b/freebsd/sys/vm/vm_extern.h @@ -0,0 +1,110 @@ +/*- + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vm_extern.h 8.2 (Berkeley) 1/12/94 + * $FreeBSD$ + */ + +#ifndef _VM_EXTERN_H_ +#define _VM_EXTERN_H_ + +struct proc; +struct vmspace; +struct vnode; + +#ifdef _KERNEL + +int kernacc(void *, int, int); +vm_offset_t kmem_alloc(vm_map_t, vm_size_t); +vm_offset_t kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, + vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr); +vm_offset_t kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, + vm_paddr_t low, vm_paddr_t high, unsigned long alignment, + unsigned long boundary, vm_memattr_t memattr); +vm_offset_t kmem_alloc_nofault(vm_map_t, vm_size_t); +vm_offset_t kmem_alloc_nofault_space(vm_map_t, vm_size_t, int); +vm_offset_t kmem_alloc_wait(vm_map_t, vm_size_t); +void kmem_free(vm_map_t, vm_offset_t, vm_size_t); +void kmem_free_wakeup(vm_map_t, vm_offset_t, vm_size_t); +void kmem_init(vm_offset_t, vm_offset_t); +vm_offset_t kmem_malloc(vm_map_t map, vm_size_t size, int flags); +int kmem_back(vm_map_t, vm_offset_t, vm_size_t, int); +vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t, + boolean_t); +void swapout_procs(int); +int useracc(void *, int, int); +int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int); +void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t, + vm_ooffset_t *); +void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t); +int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t); +int vm_forkproc(struct thread *, struct proc *, struct thread *, struct vmspace *, int); +void vm_waitproc(struct proc *); +int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t); +void vm_set_page_size(void); +void vm_sync_icache(vm_map_t, vm_offset_t, vm_size_t); +struct vmspace *vmspace_alloc(vm_offset_t, vm_offset_t); +struct vmspace *vmspace_fork(struct vmspace *, vm_ooffset_t *); +int vmspace_exec(struct proc *, vm_offset_t, vm_offset_t); +int vmspace_unshare(struct proc *); +void vmspace_exit(struct thread *); +struct vmspace *vmspace_acquire_ref(struct proc *); +void vmspace_free(struct vmspace *); +void vmspace_exitfree(struct proc *); +void vnode_pager_setsize(struct vnode *, vm_ooffset_t); +#ifndef __rtems__ +int vslock(void *, size_t); +void vsunlock(void *, size_t); +#else /* __rtems__ */ +static inline int +vslock(void *addr, size_t len) +{ + (void) addr; + (void) len; + + return (0); +} + +static inline void +vsunlock(void *addr, size_t len) +{ + (void) addr; + (void) len; +} +#endif /* __rtems__ */ +void vm_object_print(/* db_expr_t */ long, boolean_t, /* db_expr_t */ long, + char *); +int vm_fault_quick(caddr_t v, int prot); +struct sf_buf *vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset); +void vm_imgact_unmap_page(struct sf_buf *sf); +void vm_thread_dispose(struct thread *td); +int vm_thread_new(struct thread *td, int pages); +void vm_thread_swapin(struct thread *td); +void vm_thread_swapout(struct thread *td); +#endif /* _KERNEL */ +#endif /* !_VM_EXTERN_H_ */ diff --git a/rtemsbsd/include/vm/vm_extern.h b/rtemsbsd/include/machine/vmparam.h similarity index 100% rename from rtemsbsd/include/vm/vm_extern.h rename to rtemsbsd/include/machine/vmparam.h diff --git a/rtemsbsd/rtems/rtems-bsd-vm_glue.c b/rtemsbsd/rtems/rtems-bsd-vm_glue.c deleted file mode 100644 index 9bee98e5..00000000 --- a/rtemsbsd/rtems/rtems-bsd-vm_glue.c +++ /dev/null @@ -1,214 +0,0 @@ -/** - * @file - * - * @ingroup rtems_bsd_rtems - * - * @brief TODO. - */ - -/* - * COPYRIGHT (c) 2012. - * On-Line Applications Research Corporation (OAR). - * All Rights Reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ -#include - -#include -#include -#include -#include - -#include -__FBSDID("$FreeBSD$"); -#include -#include -#include -#include - -/* - * System initialization - */ -static int boot_pages = UMA_BOOT_PAGES; -static void vm_mem_init(void *); -SYSINIT(vm_mem, SI_SUB_VM, SI_ORDER_FIRST, vm_mem_init, NULL); - - -static void -vm_mem_init(dummy) - void *dummy; -{ - void *mapped; - static void *callwheel_array[270]; - caddr_t c; - caddr_t p; - - /* - * The values for mapped came from the freeBSD method - * vm_page_startup() in the freeBSD file vm_page.c. - * XXX - This may need to be adjusted for our system. - */ - mapped = calloc( boot_pages * UMA_SLAB_SIZE, 1 ); - uma_startup((void *)mapped, boot_pages); - - /* - * The following is doing a minimal amount of work from - * the method vm_ksubmap_init() in freeBSD vm_init.c. - */ - c = (caddr_t) callwheel_array; - p = kern_timeout_callwheel_alloc(c); - printf( "*** callwheel 0x%x 0x%x 0x%x\n", c, p, (c + sizeof(callwheel_array)) ); - if ( p > (c + sizeof(callwheel_array)) ) - panic( "*** not enough memory for callwheel_array ***" ); - kern_timeout_callwheel_init(); - uma_startup2(); -} - -/* - * MPSAFE - * - * WARNING! This code calls vm_map_check_protection() which only checks - * the associated vm_map_entry range. It does not determine whether the - * contents of the memory is actually readable or writable. In most cases - * just checking the vm_map_entry is sufficient within the kernel's address - * space. - */ -int -kernacc(addr, len, rw) - void *addr; - int len, rw; -{ - return 1; -} - -/* - * MPSAFE - * - * WARNING! This code calls vm_map_check_protection() which only checks - * the associated vm_map_entry range. It does not determine whether the - * contents of the memory is actually readable or writable. vmapbuf(), - * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be - * used in conjuction with this call. - */ -int -useracc(addr, len, rw) - void *addr; - int len, rw; -{ - return 1; -} - -int -vslock(void *addr, size_t len) -{ - return 0; -} - -void -vsunlock(void *addr, size_t len) -{ -} - -/* - * Destroy the given CPU private mapping and unpin the page that it mapped. - */ -void -vm_imgact_unmap_page(struct sf_buf *sf) -{ -} - - -/* - * Create the kernel stack (including pcb for i386) for a new thread. - * This routine directly affects the fork perf for a process and - * create performance for a thread. - */ -int -vm_thread_new(struct thread *td, int pages) -{ - return (1); -} - -/* - * Dispose of a thread's kernel stack. - */ -void -vm_thread_dispose(struct thread *td) -{ -} - -/* - * Allow a thread's kernel stack to be paged out. - */ -void -vm_thread_swapout(struct thread *td) -{ -} - -/* - * Bring the kernel stack for a specified thread back in. - */ -void -vm_thread_swapin(struct thread *td) -{ -} - -/* - * Implement fork's actions on an address space. - * Here we arrange for the address space to be copied or referenced, - * allocate a user struct (pcb and kernel stack), then call the - * machine-dependent layer to fill those in and make the new process - * ready to run. The new process is set up so that it returns directly - * to user mode to avoid stack copying and relocation problems. - */ -int -vm_forkproc(td, p2, td2, vm2, flags) - struct thread *td; - struct proc *p2; - struct thread *td2; - struct vmspace *vm2; - int flags; -{ -} - -/* - * Called after process has been wait(2)'ed apon and is being reaped. - * The idea is to reclaim resources that we could not reclaim while - * the process was still executing. - */ -void -vm_waitproc(p) - struct proc *p; -{ -} - -void -faultin(p) - struct proc *p; -{ -} - -void -kick_proc0(void) -{ -} diff --git a/testsuite/timeout01/init.c b/testsuite/timeout01/init.c index 4afddc70..f74a1b80 100644 --- a/testsuite/timeout01/init.c +++ b/testsuite/timeout01/init.c @@ -49,7 +49,6 @@ static void Init(rtems_task_argument arg) sc = rtems_bsd_initialize(); assert(sc == RTEMS_SUCCESSFUL); - timeout_table_init(); callout_tick_task_init(); timeout_test(); diff --git a/testsuite/timeout01/timeout_helper.c b/testsuite/timeout01/timeout_helper.c index 060fef0b..343651df 100644 --- a/testsuite/timeout01/timeout_helper.c +++ b/testsuite/timeout01/timeout_helper.c @@ -39,30 +39,6 @@ #include "timeout_helper.h" -void timeout_table_init() -{ - size_t size = 0; - caddr_t v = 0; - void* firstaddr = 0; - - /* calculates how much memory is needed */ - v = kern_timeout_callwheel_alloc(v); - - /* allocate memory */ - size = (size_t)v; - firstaddr = malloc(round_page(size)); - assert(firstaddr != NULL); - - /* now set correct addresses for callwheel */ - v = (caddr_t) firstaddr; - v = kern_timeout_callwheel_alloc(v); - - assert((size_t)((void *)v - firstaddr) == size); - - /* Initialize the callouts we just allocated. */ - kern_timeout_callwheel_init(); -} - #define CALLOUT_TICK_TASK_PRIO (PRIORITY_DEFAULT_MAXIMUM - 1) #define CALLOUT_TICK_TASK_STACK_SIZE (1024) #define CALLOUT_TICK_TASK_NAME rtems_build_name('C', 'O', 'U', 'T') diff --git a/testsuite/timeout01/timeout_helper.h b/testsuite/timeout01/timeout_helper.h index 7e5acce1..050c592e 100644 --- a/testsuite/timeout01/timeout_helper.h +++ b/testsuite/timeout01/timeout_helper.h @@ -36,7 +36,6 @@ extern "C" { #endif /* __cplusplus */ -void timeout_table_init(void); void callout_tick_task_init(void); #ifdef __cplusplus