nfsclient: Import from RTEMS

RTEMS Git commit 251c94d3d3d27e0039f01b718e5c2eb06f39fdf7.
This commit is contained in:
Sebastian Huber 2016-06-09 11:31:27 +02:00
parent a037da67c6
commit 4464594567
13 changed files with 8900 additions and 0 deletions

View File

@ -0,0 +1,234 @@
/**
* @file
*
* @brief Public Interface to the NFS Client Library for RTEMS
*
* @ingroup rtems-nfsclient
*/
/*
* Author: Till Straumann <strauman@slac.stanford.edu> 2002-2003
*
* Authorship
* ----------
* This software (NFS-2 client implementation for RTEMS) was created by
* Till Straumann <strauman@slac.stanford.edu>, 2002-2007,
* Stanford Linear Accelerator Center, Stanford University.
*
* Acknowledgement of sponsorship
* ------------------------------
* The NFS-2 client implementation for RTEMS was produced by
* the Stanford Linear Accelerator Center, Stanford University,
* under Contract DE-AC03-76SFO0515 with the Department of Energy.
*
* Government disclaimer of liability
* ----------------------------------
* Neither the United States nor the United States Department of Energy,
* nor any of their employees, makes any warranty, express or implied, or
* assumes any legal liability or responsibility for the accuracy,
* completeness, or usefulness of any data, apparatus, product, or process
* disclosed, or represents that its use would not infringe privately owned
* rights.
*
* Stanford disclaimer of liability
* --------------------------------
* Stanford University makes no representations or warranties, express or
* implied, nor assumes any liability for the use of this software.
*
* Stanford disclaimer of copyright
* --------------------------------
* Stanford University, owner of the copyright, hereby disclaims its
* copyright and all other rights in this software. Hence, anyone may
* freely use it for any purpose without restriction.
*
* Maintenance of notices
* ----------------------
* In the interest of clarity regarding the origin and status of this
* SLAC software, this and all the preceding Stanford University notices
* are to remain affixed to any copy or derivative of this software made
* or distributed by the recipient and are to be affixed to any copy of
* software made or distributed by the recipient that contains a copy or
* derivative of this software.
*
* ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03
*/
#ifndef LIB_RTEMS_NFS_CLIENT_H
#define LIB_RTEMS_NFS_CLIENT_H
/**
* @defgroup rtems-nfsclient NFS Client Library
*
* @ingroup nfsclient
* @{
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <rtems.h>
#include <rtems/libio.h>
#include <rtems/libio_.h>
#include <rtems/seterr.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <sys/stat.h>
#include <dirent.h>
#include <netdb.h>
#include <ctype.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#ifdef __cplusplus
extern "C" {
#endif
/** RPCIO driver interface.
* If you need RPCIO for other purposes than NFS
* you may want to include <rpcio.h>
#include "rpcio.h"
*/
/** Priority of daemon; may be setup prior to calling rpcUdpInit();
* otherwise the network task priority from the rtems_bsdnet_config
* is used...
*/
extern rtems_task_priority rpciodPriority;
#ifdef RTEMS_SMP
/** CPU affinity of daemon; may be setup prior to calling rpcUdpInit();
* otherwise the network task CPU affinity from the rtems_bsdnet_config
* is used...
*/
extern const cpu_set_t *rpciodCpuset;
extern size_t rpciodCpusetSize;
#endif
/**
* @brief Sets the XIDs of the RPC transaction hash table.
*
* The active RPC transactions are stored in a hash table. Each table entry
* contains the XID of its corresponding transaction. The XID consists of two
* parts. The lower part is determined by the hash table index. The upper
* part is incremented in each send operation.
*
* This function sets the upper part of the XID in all hash table entries.
* This can be used to ensure that the XIDs are not reused in a short interval
* for example during a boot process or after resets.
*
* @param[in] xid The upper part is used to set the upper XID part of the hash
* table entries.
*/
void
rpcSetXIDs(uint32_t xid);
/** Initialize the driver.
*
* Note, called in nfsfs initialise when mount is called.
*
* @retval 0 on success, -1 on failure
*/
int
rpcUdpInit(void);
/**
* @brief RPC cleanup and stop.
*
* @retval 0 on success, nonzero if still in use
*/
int
rpcUdpCleanup(void);
/** NFS driver interface */
/**
* @brief Initialize the NFS driver.
*
* The RPCIO driver must have been initialized prior to calling this.
*
* Note, called in nfsfs initialise when mount is called with defaults.
*
* ARGS: depth of the small and big
* transaction pools, i.e. how
* many transactions (buffers)
* should always be kept around.
*
* (If more transactions are needed,
* they are created and destroyed
* on the fly).
*
* Supply zero values to have the
* driver chose reasonable defaults.
*
* @retval 0 Successful operation.
* @retval -1 An error occurred. The errno is set to indicate the error.
*/
int
nfsInit(int smallPoolDepth, int bigPoolDepth);
/**
* @brief Driver cleanup code.
*
* @retval 0 on success, nonzero if still in use
*/
int
nfsCleanup(void);
/**
* @brief Dump a list of the currently mounted NFS to a file.
*
* Dump a list of the currently mounted NFS to a file
* (stdout is used in case f==NULL)
*/
int
nfsMountsShow(FILE *f);
/**
* @brief Filesystem mount table mount handler.
*
* Filesystem mount table mount handler. Do not call, use the mount call.
*/
int
rtems_nfs_initialize(rtems_filesystem_mount_table_entry_t *mt_entry,
const void *data);
/**
* @brief A utility routine to find the path leading to a
* rtems_filesystem_location_info_t node.
*
* This should really be present in libcsupport...
*
* @param[in] 'loc' and a buffer 'buf' (length 'len') to hold the path.
*
* @param[out] path copied into 'buf'
*
* @retval 0 on success, RTEMS error code on error.
*/
rtems_status_code
rtems_filesystem_resolve_location(char *buf, int len, rtems_filesystem_location_info_t *loc);
/**
* @brief Set the timeout (initial default: 10s) for NFS and mount calls.
*
* Set the timeout (initial default: 10s) for NFS and mount calls.
*
* @retval 0 on success, nonzero if the requested timeout is less than
* a clock tick or if the system clock rate cannot be determined.
*/
int
nfsSetTimeout(uint32_t timeout_ms);
/** Read current timeout (in milliseconds) */
uint32_t
nfsGetTimeout(void);
#ifdef __cplusplus
}
#endif
/** @} */
#endif

View File

@ -0,0 +1,44 @@
/*
* Authorship
* ----------
* This software (NFS-2 client implementation for RTEMS) was created by
* Till Straumann <strauman@slac.stanford.edu>, 2002-2007,
* Stanford Linear Accelerator Center, Stanford University.
*
* Acknowledgement of sponsorship
* ------------------------------
* The NFS-2 client implementation for RTEMS was produced by
* the Stanford Linear Accelerator Center, Stanford University,
* under Contract DE-AC03-76SFO0515 with the Department of Energy.
*
* Government disclaimer of liability
* ----------------------------------
* Neither the United States nor the United States Department of Energy,
* nor any of their employees, makes any warranty, express or implied, or
* assumes any legal liability or responsibility for the accuracy,
* completeness, or usefulness of any data, apparatus, product, or process
* disclosed, or represents that its use would not infringe privately owned
* rights.
*
* Stanford disclaimer of liability
* --------------------------------
* Stanford University makes no representations or warranties, express or
* implied, nor assumes any liability for the use of this software.
*
* Stanford disclaimer of copyright
* --------------------------------
* Stanford University, owner of the copyright, hereby disclaims its
* copyright and all other rights in this software. Hence, anyone may
* freely use it for any purpose without restriction.
*
* Maintenance of notices
* ----------------------
* In the interest of clarity regarding the origin and status of this
* SLAC software, this and all the preceding Stanford University notices
* are to remain affixed to any copy or derivative of this software made
* or distributed by the recipient and are to be affixed to any copy of
* software made or distributed by the recipient that contains a copy or
* derivative of this software.
*
* ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03
*/

548
rtemsbsd/nfsclient/README Normal file
View File

@ -0,0 +1,548 @@
RTEMS-NFS
=========
A NFS-V2 client implementation for the RTEMS real-time
executive.
Author: Till Straumann <strauman@slac.stanford.edu>, 2002
Copyright 2002, Stanford University and
Till Straumann <strauman@slac.stanford.edu>
Stanford Notice
***************
Acknowledgement of sponsorship
* * * * * * * * * * * * * * * *
This software was produced by the Stanford Linear Accelerator Center,
Stanford University, under Contract DE-AC03-76SFO0515 with the Department
of Energy.
Contents
--------
I Overview
1) Performance
2) Reference Platform / Test Environment
II Usage
1) Initialization
2) Mounting Remote Server Filesystems
3) Unmounting
4) Unloading
5) Dumping Information / Statistics
III Implementation Details
1) RPCIOD
2) NFS
3) RTEMS Resources Used By NFS/RPCIOD
4) Caveats & Bugs
IV Licensing & Disclaimers
I Overview
-----------
This package implements a simple non-caching NFS
client for RTEMS. Most of the system calls are
supported with the exception of 'mount', i.e. it
is not possible to mount another FS on top of NFS
(mostly because of the difficulty that arises when
mount points are deleted on the server). It
shouldn't be hard to do, though.
Note: this client supports NFS vers. 2 / MOUNT vers. 1;
NFS Version 3 or higher are NOT supported.
The package consists of two modules: RPCIOD and NFS
itself.
- RPCIOD is a UDP/RPC multiplexor daemon. It takes
RPC requests from multiple local client threads,
funnels them through a single socket to multiple
servers and dispatches the replies back to the
(blocked) requestor threads.
RPCIOD does packet retransmission and handles
timeouts etc.
Note however, that it does NOT do any XDR
marshalling - it is up to the requestor threads
to do the XDR encoding/decoding. RPCIOD _is_ RPC
specific, though, because its message dispatching
is based on the RPC transaction ID.
- The NFS package maps RTEMS filesystem calls
to proper RPCs, it does the XDR work and
hands marshalled RPC requests to RPCIOD.
All of the calls are synchronous, i.e. they
block until they get a reply.
1) Performance
- - - - - - - -
Performance sucks (due to the lack of
readahead/delayed write and caching). On a fast
(100Mb/s) ethernet, it takes about 20s to copy a
10MB file from NFS to NFS. I found, however, that
vxWorks' NFS client doesn't seem to be any
faster...
Since there is no buffer cache with read-ahead
implemented, all NFS reads are synchronous RPC
calls. Every read operation involves sending a
request and waiting for the reply. As long as the
overhead (sending request + processing it on the
server) is significant compared to the time it
takes to transferring the actual data, increasing
the amount of data per request results in better
throughput. The UDP packet size limit imposes a
limit of 8k per RPC call, hence reading from NFS
in chunks of 8k is better than chunks of 1k [but
chunks >8k are not possible, i.e., simply not
honoured: read(a_nfs_fd, buf, 20000) returns
8192]. This is similar to the old linux days
(mount with rsize=8k). You can let stdio take
care of the buffering or use 8k buffers with
explicit read(2) operations. Note that stdio
honours the file-system's st_blksize field
if newlib is compiled with HAVE_BLKSIZE defined.
In this case, stdio uses 8k buffers for files
on NFS transparently. The blocksize NFS
reports can be tuned with a global variable
setting (see nfs.c for details).
Further increase of throughput can be achieved
with read-ahead (issuing RPC calls in parallel
[send out request for block n+1 while you are
waiting for data of block n to arrive]). Since
this is not handled by the file system itself, you
would have to code this yourself e.g., using
parallel threads to read from a single file from
interleaved offsets.
Another obvious improvement can be achieved if
processing the data takes a significant amount of
time. Then, having a pipeline of threads for
reading data and processing them makes sense
[thread b processes chunk n while thread a blocks
in read(chunk n+1)].
Some performance figures:
Software: src/nfsTest.c:nfsReadTest() [data not
processed in any way].
Hardware: MVME6100
Network: 100baseT-FD
Server: Linux-2.6/RHEL4-smp [dell precision 420]
File: 10MB
Results:
Single threaded ('normal') NFS read, 1k buffers: 3.46s (2.89MB/s)
Single threaded ('normal') NFS read, 8k buffers: 1.31s (7.63MB/s)
Multi threaded; 2 readers, 8k buffers/xfers: 1.12s (8.9 MB/s)
Multi threaded; 3 readers, 8k buffers/xfers: 1.04s (9.6 MB/s)
2) Reference Platform
- - - - - - - - - - -
RTEMS-NFS was developed and tested on
o RTEMS-ss20020301 (local patches applied)
o PowerPC G3, G4 on Synergy SVGM series board
(custom 'SVGM' BSP, to be released soon)
o PowerPC 604 on MVME23xx
(powerpc/shared/motorola-powerpc BSP)
o Test Environment:
- RTEMS executable running CEXP
- rpciod/nfs dynamically loaded from TFTPfs
- EPICS application dynamically loaded from NFS;
the executing IOC accesses all of its files
on NFS.
II Usage
---------
After linking into the system and proper initialization
(rtems-NFS supports 'magic' module initialization when
loaded into a running system with the CEXP loader),
you are ready for mounting NFSes from a server
(I avoid the term NFS filesystem because NFS already
stands for 'Network File System').
You should also read the
- "RTEMS Resources Used By NFS/RPCIOD"
- "CAVEATS & BUGS"
below.
1) Initialization
- - - - - - - - -
NFS consists of two modules who must be initialized:
a) the RPCIO daemon package; by calling
rpcUdpInit();
note that this step must be performed prior to
initializing NFS:
b) NFS is initialized by calling
nfsInit( smallPoolDepth, bigPoolDepth );
if you supply 0 (zero) values for the pool
depths, the compile-time default configuration
is used which should work fine.
NOTE: when using CEXP to load these modules into a
running system, initialization will be performed
automagically.
2) Mounting Remote Server Filesystems
- - - - - - - - - - - - - - - - - - -
There are two interfaces for mounting an NFS:
- The (non-POSIX) RTEMS 'mount()' call:
mount( &mount_table_entry_pointer,
&filesystem_operations_table_pointer,
options,
device,
mount_point )
Note that you must specify a 'mount_table_entry_pointer'
(use a dummy) - RTEMS' mount() doesn't grok a NULL for
the first argument.
o for the 'filesystem_operations_table_pointer', supply
&nfs_fs_ops
o options are constants (see RTEMS headers) for specifying
read-only / read-write mounts.
o the 'device' string specifies the remote filesystem
who is to be mounted. NFS expects a string conforming
to the following format (EBNF syntax):
[ <uid> '.' <gid> '@' ] <hostip> ':' <path>
The first optional part of the string allows you
to specify the credentials to be used for all
subsequent transactions with this server. If the
string is omitted, the EUID/EGID of the executing
thread (i.e. the thread performing the 'mount' -
NFS will still 'remember' these values and use them
for all future communication with this server).
The <hostip> part denotes the server IP address
in standard 'dot' notation. It is followed by
a colon and the (absolute) path on the server.
Note that no extra characters or whitespace must
be present in the string. Example 'device' strings
are:
"300.99@192.168.44.3:/remote/rtems/root"
"192.168.44.3:/remote/rtems/root"
o the 'mount_point' string identifies the local
directory (most probably on IMFS) where the NFS
is to be mounted. Note that the mount point must
already exist with proper permissions.
- Alternate 'mount' interface. NFS offers a more
convenient wrapper taking three string arguments:
nfsMount(uidgid_at_host, server_path, mount_point)
This interface does DNS lookup (see reentrancy note
below) and creates the mount point if necessary.
o the first argument specifies the server and
optionally the uid/gid to be used for authentication.
The semantics are exactly as described above:
[ <uid> '.' <gid> '@' ] <host>
The <host> part may be either a host _name_ or
an IP address in 'dot' notation. In the former
case, nfsMount() uses 'gethostbyname()' to do
a DNS lookup.
IMPORTANT NOTE: gethostbyname() is NOT reentrant/
thread-safe and 'nfsMount()' (if not provided with an
IP/dot address string) is hence subject to race conditions.
o the 'server_path' and 'mount_point' arguments
are described above.
NOTE: If the mount point does not exist yet,
nfsMount() tries to create it.
o if nfsMount() is called with a NULL 'uidgid_at_host'
argument, it lists all currently mounted NFS
3) Unmounting
- - - - - - -
An NFS can be unmounted using RTEMS 'unmount()'
call (yep, it is unmount() - not umount()):
unmount(mount_point)
Note that you _must_ supply the mount point (string
argument). It is _not_ possible to specify the
'mountee' when unmounting. NFS implements no
convenience wrapper for this (yet), essentially because
(although this sounds unbelievable) it is non-trivial
to lookup the path leading to an RTEMS filesystem
directory node.
4) Unloading
- - - - - - -
After unmounting all NFS from the system, the NFS
and RPCIOD modules may be stopped and unloaded.
Just call 'nfsCleanup()' and 'rpcUdpCleanup()'
in this order. You should evaluate the return value
of these routines which is non-zero if either
of them refuses to yield (e.g. because there are
still mounted filesystems).
Again, when unloading is done by CEXP this is
transparently handled.
5) Dumping Information / Statistics
- - - - - - - - - - - - - - - - - -
Rudimentary RPCIOD statistics are printed
to a file (stdout when NULL) by
int rpcUdpStats(FILE *f)
A list of all currently mounted NFS can be
printed to a file (stdout if NULL) using
int nfsMountsShow(FILE *f)
For convenience, this routine is also called
by nfsMount() when supplying NULL arguments.
III Implementation Details
--------------------------
1) RPCIOD
- - - - -
RPCIOD was created to
a) avoid non-reentrant librpc calls.
b) support 'asynchronous' operation over a single
socket.
RPCIOD is a daemon thread handling 'transaction objects'
(XACTs) through an UDP socket. XACTs are marshalled RPC
calls/replies associated with RPC servers and requestor
threads.
requestor thread: network:
XACT packet
| |
V V
| message queue | ( socket )
| | ^
----------> <----- | |
RPCIOD |
/ --------------
timeout/ (re) transmission
A requestor thread drops a transaction into
the message queue and goes to sleep. The XACT is
picked up by rpciod who is listening for events from
three sources:
o the request queue
o packet arrival at the socket
o timeouts
RPCIOD sends the XACT to its destination server and
enqueues the pending XACT into an ordered list of
outstanding transactions.
When a packet arrives, RPCIOD (based on the RPC transaction
ID) looks up the matching XACT and wakes up the requestor
who can then XDR-decode the RPC results found in the XACT
object's buffer.
When a timeout expires, RPCIOD examines the outstanding
XACT that is responsible for the timeout. If its lifetime
has not expired yet, RPCIOD resends the request. Otherwise,
the XACT's error status is set and the requestor is woken up.
RPCIOD dynamically adjusts the retransmission intervals
based on the average round-trip time measured (on a per-server
basis).
Having the requestors event driven (rather than blocking
e.g. on a semaphore) is geared to having many different
requestors (one synchronization object per requestor would
be needed otherwise).
Requestors who want to do asynchronous IO need a different
interface which will be added in the future.
1.a) Reentrancy
- - - - - - - -
RPCIOD does no non-reentrant librpc calls.
1.b) Efficiency
- - - - - - - -
We shouldn't bother about efficiency until pipelining (read-ahead/
delayed write) and caching are implemented. The round-trip delay
associated with every single RPC transaction clearly is a big
performance killer.
Nevertheless, I could not withstand the temptation to eliminate
the extra copy step involved with socket IO:
A user data object has to be XDR encoded into a buffer. The
buffer given to the socket where it is copied into MBUFs.
(The network chip driver might even do more copying).
Likewise, on reception 'recvfrom' copies MBUFS into a user
buffer which is XDR decoded into the final user data object.
Eliminating the copying into (possibly multiple) MBUFS by
'sendto()' is actually a piece of cake. RPCIOD uses the
'sosend()' routine [properly wrapped] supplying a single
MBUF header who directly points to the marshalled buffer
:-)
Getting rid of the extra copy on reception was (only a little)
harder: I derived a 'XDR-mbuf' stream from SUN's xdr_mem which
allows for XDR-decoding out of a MBUF chain who is obtained by
soreceive().
2) NFS
- - - -
The actual NFS implementation is straightforward and essentially
'passive' (no threads created). Any RTEMS task executing a
filesystem call dispatched to NFS (such as 'opendir()', 'lseek()'
or 'unlink()') ends up XDR encoding arguments, dropping a
XACT into RPCIOD's message queue and going to sleep.
When woken up by RPCIOD, the XACT is decoded (using the XDR-mbuf
stream mentioned above) and the properly cooked-up results are
returned.
3) RTEMS Resources Used By NFS/RPCIOD
- - - - - - - - - - - - - - - - - - -
The RPCIOD/NFS package uses the following resources. Some
parameters are compile-time configurable - consult the
source files for details.
RPCIOD:
o 1 task
o 1 message queue
o 1 socket/filedescriptor
o 2 semaphores (a third one is temporarily created during
rpcUdpCleanup()).
o 1 RTEMS EVENT (by default RTEMS_EVENT_30).
IMPORTANT: this event is used by _every_ thread executing
NFS system calls and hence is RESERVED.
o 3 events only used by RPCIOD itself, i.e. these must not
be sent to RPCIOD by no other thread (except for the intended
use, of course). The events involved are 1,2,3.
o preemption disabled sections: NONE
o sections with interrupts disabled: NONE
o NO 'timers' are used (timer code would run in IRQ context)
o memory usage: n.a
NFS:
o 2 message queues
o 2 semaphores
o 1 semaphore per mounted NFS
o 1 slot in driver entry table (for major number)
o preemption disabled sections: NONE
o sections with interrupts disabled: NONE
o 1 task + 1 semaphore temporarily created when
listing mounted filesystems (rtems_filesystem_resolve_location())
4) CAVEATS & BUGS
- - - - - - - - -
Unfortunately, some bugs crawl around in the filesystem generics.
(Some of them might already be fixed in versions later than
rtems-ss-20020301).
I recommend to use the patch distributed with RTEMS-NFS.
o RTEMS uses/used (Joel said it has been fixed already) a 'short'
ino_t which is not enough for NFS.
The driver detects this problem and enables a workaround. In rare
situations (mainly involving 'getcwd()' improper inode comparison
may result (due to the restricted size, stat() returns st_ino modulo
2^16). In most cases, however, st_dev is compared along with st_ino
which will give correct results (different files may yield identical
st_ino but they will have different st_dev). However, there is
code (in getcwd(), for example) who assumes that files residing
in one directory must be hosted by the same device and hence omits
the st_dev comparison. In such a case, the workaround will fail.
NOTE: changing the size (sys/types.h) of ino_t from 'short' to 'long'
is strongly recommended. It is NOT included in the patch, however
as this is a major change requiring ALL of your sources to
be recompiled.
THE ino_t SIZE IS FIXED IN GCC-3.2/NEWLIB-1.10.0-2 DISTRIBUTED BY
OAR.
o You may work around most filesystem bugs by observing the following
rules:
* never use chroot() (fixed by the patch)
* never use getpwent(), getgrent() & friends - they are NOT THREAD
safe (fixed by the patch)
* NEVER use rtems_libio_share_private_env() - not even with the
patch applied. Just DONT - it is broken by design.
* All threads who have their own userenv (who have called
rtems_libio_set_private_env()) SHOULD 'chdir("/")' before
terminating. Otherwise, (i.e. if their cwd is on NFS), it will
be impossible to unmount the NFS involved.
o The patch slightly changes the semantics of 'getpwent()' and
'getgrent()' & friends (to what is IMHO correct anyways - the patch is
also needed to fix another problem, however): with the patch applied,
the passwd and group files are always accessed from the 'current' user
environment, i.e. a thread who has changed its 'root' or 'uid' might
not be able to access these files anymore.
o NOTE: RTEMS 'mount()' / 'unmount()' are NOT THREAD SAFE.
o The NFS protocol has no 'append' or 'seek_end' primitive. The client
must query the current file size (this client uses cached info) and
change the local file pointer accordingly (in 'O_APPEND' mode).
Obviously, this involves a race condition and hence multiple clients
writing the same file may lead to corruption.
IV Licensing & Disclaimers
--------------------------
NFS is distributed under the SLAC License - consult the
separate 'LICENSE' file.
Government disclaimer of liability
- - - - - - - - - - - - - - - - -
Neither the United States nor the United States Department of Energy,
nor any of their employees, makes any warranty, express or implied,
or assumes any legal liability or responsibility for the accuracy,
completeness, or usefulness of any data, apparatus, product, or process
disclosed, or represents that its use would not infringe privately
owned rights.
Stanford disclaimer of liability
- - - - - - - - - - - - - - - - -
Stanford University makes no representations or warranties, express or
implied, nor assumes any liability for the use of this software.
Maintenance of notice
- - - - - - - - - - -
In the interest of clarity regarding the origin and status of this
software, Stanford University requests that any recipient of it maintain
this notice affixed to any distribution by the recipient that contains a
copy or derivative of this software.

View File

@ -0,0 +1,152 @@
/**
* @file
*
* @brief Nfsclient Mount Prot
*
* @ingroup rtems-nfsclient
*/
#ifndef _MOUNT_PROT_H_RPCGEN
#define _MOUNT_PROT_H_RPCGEN
#include <rpc/rpc.h>
/**
* @defgroup libfs_nfsclient_mount_prot Mount Prot
*
* @ingroup libfs
*/
/**@{*/
#ifdef __cplusplus
extern "C" {
#endif
#define MNTPATHLEN 1024
#define MNTNAMLEN 255
#define FHSIZE 32
typedef char fhandle[FHSIZE];
struct fhstatus {
u_int fhs_status;
union {
fhandle fhs_fhandle;
} fhstatus_u;
};
typedef struct fhstatus fhstatus;
typedef char *dirpath;
typedef char *name;
typedef struct mountbody *mountlist;
struct mountbody {
name ml_hostname;
dirpath ml_directory;
mountlist ml_next;
};
typedef struct mountbody mountbody;
typedef struct groupnode *groups;
struct groupnode {
name gr_name;
groups gr_next;
};
typedef struct groupnode groupnode;
typedef struct exportnode *exports;
struct exportnode {
dirpath ex_dir;
groups ex_groups;
exports ex_next;
};
typedef struct exportnode exportnode;
#define MOUNTPROG 100005
#define MOUNTVERS 1
#if defined(__STDC__) || defined(__cplusplus)
#define MOUNTPROC_NULL 0
extern void * mountproc_null_1(void *, CLIENT *);
extern void * mountproc_null_1_svc(void *, struct svc_req *);
#define MOUNTPROC_MNT 1
extern fhstatus * mountproc_mnt_1(dirpath *, CLIENT *);
extern fhstatus * mountproc_mnt_1_svc(dirpath *, struct svc_req *);
#define MOUNTPROC_DUMP 2
extern mountlist * mountproc_dump_1(void *, CLIENT *);
extern mountlist * mountproc_dump_1_svc(void *, struct svc_req *);
#define MOUNTPROC_UMNT 3
extern void * mountproc_umnt_1(dirpath *, CLIENT *);
extern void * mountproc_umnt_1_svc(dirpath *, struct svc_req *);
#define MOUNTPROC_UMNTALL 4
extern void * mountproc_umntall_1(void *, CLIENT *);
extern void * mountproc_umntall_1_svc(void *, struct svc_req *);
#define MOUNTPROC_EXPORT 5
extern exports * mountproc_export_1(void *, CLIENT *);
extern exports * mountproc_export_1_svc(void *, struct svc_req *);
#define MOUNTPROC_EXPORTALL 6
extern exports * mountproc_exportall_1(void *, CLIENT *);
extern exports * mountproc_exportall_1_svc(void *, struct svc_req *);
extern int mountprog_1_freeresult (SVCXPRT *, xdrproc_t, caddr_t);
#else /* K&R C */
#define MOUNTPROC_NULL 0
extern void * mountproc_null_1();
extern void * mountproc_null_1_svc();
#define MOUNTPROC_MNT 1
extern fhstatus * mountproc_mnt_1();
extern fhstatus * mountproc_mnt_1_svc();
#define MOUNTPROC_DUMP 2
extern mountlist * mountproc_dump_1();
extern mountlist * mountproc_dump_1_svc();
#define MOUNTPROC_UMNT 3
extern void * mountproc_umnt_1();
extern void * mountproc_umnt_1_svc();
#define MOUNTPROC_UMNTALL 4
extern void * mountproc_umntall_1();
extern void * mountproc_umntall_1_svc();
#define MOUNTPROC_EXPORT 5
extern exports * mountproc_export_1();
extern exports * mountproc_export_1_svc();
#define MOUNTPROC_EXPORTALL 6
extern exports * mountproc_exportall_1();
extern exports * mountproc_exportall_1_svc();
extern int mountprog_1_freeresult ();
#endif /* K&R C */
/* the xdr functions */
#if defined(__STDC__) || defined(__cplusplus)
extern bool_t xdr_fhandle (XDR *, fhandle);
extern bool_t xdr_fhstatus (XDR *, fhstatus*);
extern bool_t xdr_dirpath (XDR *, dirpath*);
extern bool_t xdr_name (XDR *, name*);
extern bool_t xdr_mountlist (XDR *, mountlist*);
extern bool_t xdr_mountbody (XDR *, mountbody*);
extern bool_t xdr_groups (XDR *, groups*);
extern bool_t xdr_groupnode (XDR *, groupnode*);
extern bool_t xdr_exports (XDR *, exports*);
extern bool_t xdr_exportnode (XDR *, exportnode*);
#else /* K&R C */
extern bool_t xdr_fhandle ();
extern bool_t xdr_fhstatus ();
extern bool_t xdr_dirpath ();
extern bool_t xdr_name ();
extern bool_t xdr_mountlist ();
extern bool_t xdr_mountbody ();
extern bool_t xdr_groups ();
extern bool_t xdr_groupnode ();
extern bool_t xdr_exports ();
extern bool_t xdr_exportnode ();
#endif /* K&R C */
#ifdef __cplusplus
}
#endif
/**@}*/
#endif /* !_MOUNT_PROT_H_RPCGEN */

View File

@ -0,0 +1,161 @@
/* @(#)mount.x 2.1 88/08/01 4.0 RPCSRC */
/* @(#)mount.x 1.2 87/09/18 Copyr 1987 Sun Micro */
/*
* Sun RPC is a product of Sun Microsystems, Inc. and is provided for
* unrestricted use provided that this legend is included on all tape
* media and as a part of the software program in whole or part. Users
* may copy or modify Sun RPC without charge, but are not authorized
* to license or distribute it to anyone else except as part of a product or
* program developed by the user.
*
* SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
* WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
* PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
*
* Sun RPC is provided with no support and without any obligation on the
* part of Sun Microsystems, Inc. to assist in its use, correction,
* modification or enhancement.
*
* SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
* INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
* OR ANY PART THEREOF.
*
* In no event will Sun Microsystems, Inc. be liable for any lost revenue
* or profits or other special, indirect and consequential damages, even if
* Sun has been advised of the possibility of such damages.
*
* Sun Microsystems, Inc.
* 2550 Garcia Avenue
* Mountain View, California 94043
*/
/*
* Protocol description for the mount program
*/
const MNTPATHLEN = 1024; /* maximum bytes in a pathname argument */
const MNTNAMLEN = 255; /* maximum bytes in a name argument */
const FHSIZE = 32; /* size in bytes of a file handle */
/*
* The fhandle is the file handle that the server passes to the client.
* All file operations are done using the file handles to refer to a file
* or a directory. The file handle can contain whatever information the
* server needs to distinguish an individual file.
*/
typedef opaque fhandle[FHSIZE];
/*
* If a status of zero is returned, the call completed successfully, and
* a file handle for the directory follows. A non-zero status indicates
* some sort of error. The status corresponds with UNIX error numbers.
*/
union fhstatus switch (unsigned fhs_status) {
case 0:
fhandle fhs_fhandle;
default:
void;
};
/*
* The type dirpath is the pathname of a directory
*/
typedef string dirpath<MNTPATHLEN>;
/*
* The type name is used for arbitrary names (hostnames, groupnames)
*/
typedef string name<MNTNAMLEN>;
/*
* A list of who has what mounted
*/
typedef struct mountbody *mountlist;
struct mountbody {
name ml_hostname;
dirpath ml_directory;
mountlist ml_next;
};
/*
* A list of netgroups
*/
typedef struct groupnode *groups;
struct groupnode {
name gr_name;
groups gr_next;
};
/*
* A list of what is exported and to whom
*/
typedef struct exportnode *exports;
struct exportnode {
dirpath ex_dir;
groups ex_groups;
exports ex_next;
};
program MOUNTPROG {
/*
* Version one of the mount protocol communicates with version two
* of the NFS protocol. The only connecting point is the fhandle
* structure, which is the same for both protocols.
*/
version MOUNTVERS {
/*
* Does no work. It is made available in all RPC services
* to allow server reponse testing and timing
*/
void
MOUNTPROC_NULL(void) = 0;
/*
* If fhs_status is 0, then fhs_fhandle contains the
* file handle for the directory. This file handle may
* be used in the NFS protocol. This procedure also adds
* a new entry to the mount list for this client mounting
* the directory.
* Unix authentication required.
*/
fhstatus
MOUNTPROC_MNT(dirpath) = 1;
/*
* Returns the list of remotely mounted filesystems. The
* mountlist contains one entry for each hostname and
* directory pair.
*/
mountlist
MOUNTPROC_DUMP(void) = 2;
/*
* Removes the mount list entry for the directory
* Unix authentication required.
*/
void
MOUNTPROC_UMNT(dirpath) = 3;
/*
* Removes all of the mount list entries for this client
* Unix authentication required.
*/
void
MOUNTPROC_UMNTALL(void) = 4;
/*
* Returns a list of all the exported filesystems, and which
* machines are allowed to import it.
*/
exports
MOUNTPROC_EXPORT(void) = 5;
/*
* Identical to MOUNTPROC_EXPORT above
*/
exports
MOUNTPROC_EXPORTALL(void) = 6;
} = 1;
} = 100005;

View File

@ -0,0 +1,111 @@
/**
* @file
*
* @brief Mount Prot XDR
* @ingroup libfs_nfsclient_mount_prot Mount Prot
*/
/*
* Please do not edit this file.
* It was generated using rpcgen.
*/
#include "mount_prot.h"
bool_t
xdr_fhandle (XDR *xdrs, fhandle objp)
{
if (!xdr_opaque (xdrs, objp, FHSIZE))
return FALSE;
return TRUE;
}
bool_t
xdr_fhstatus (XDR *xdrs, fhstatus *objp)
{
if (!xdr_u_int (xdrs, &objp->fhs_status))
return FALSE;
switch (objp->fhs_status) {
case 0:
if (!xdr_fhandle (xdrs, objp->fhstatus_u.fhs_fhandle))
return FALSE;
break;
default:
break;
}
return TRUE;
}
bool_t
xdr_dirpath (XDR *xdrs, dirpath *objp)
{
if (!xdr_string (xdrs, objp, MNTPATHLEN))
return FALSE;
return TRUE;
}
bool_t
xdr_name (XDR *xdrs, name *objp)
{
if (!xdr_string (xdrs, objp, MNTNAMLEN))
return FALSE;
return TRUE;
}
bool_t
xdr_mountlist (XDR *xdrs, mountlist *objp)
{
if (!xdr_pointer (xdrs, (char **)objp, sizeof (struct mountbody), (xdrproc_t) xdr_mountbody))
return FALSE;
return TRUE;
}
bool_t
xdr_mountbody (XDR *xdrs, mountbody *objp)
{
if (!xdr_name (xdrs, &objp->ml_hostname))
return FALSE;
if (!xdr_dirpath (xdrs, &objp->ml_directory))
return FALSE;
if (!xdr_mountlist (xdrs, &objp->ml_next))
return FALSE;
return TRUE;
}
bool_t
xdr_groups (XDR *xdrs, groups *objp)
{
if (!xdr_pointer (xdrs, (char **)objp, sizeof (struct groupnode), (xdrproc_t) xdr_groupnode))
return FALSE;
return TRUE;
}
bool_t
xdr_groupnode (XDR *xdrs, groupnode *objp)
{
if (!xdr_name (xdrs, &objp->gr_name))
return FALSE;
if (!xdr_groups (xdrs, &objp->gr_next))
return FALSE;
return TRUE;
}
bool_t
xdr_exports (XDR *xdrs, exports *objp)
{
if (!xdr_pointer (xdrs, (char **)objp, sizeof (struct exportnode), (xdrproc_t) xdr_exportnode))
return FALSE;
return TRUE;
}
bool_t
xdr_exportnode (XDR *xdrs, exportnode *objp)
{
if (!xdr_dirpath (xdrs, &objp->ex_dir))
return FALSE;
if (!xdr_groups (xdrs, &objp->ex_groups))
return FALSE;
if (!xdr_exports (xdrs, &objp->ex_next))
return FALSE;
return TRUE;
}

3240
rtemsbsd/nfsclient/nfs.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,460 @@
/*
* Please do not edit this file.
* It was generated using rpcgen.
*/
#ifndef _NFS_PROT_H_RPCGEN
#define _NFS_PROT_H_RPCGEN
#include <rpc/rpc.h>
/**
* @defgroup libfs_nfsclient_nfs_prot NFS Prot
*
* @ingroup libfs
*/
/**@{*/
#ifdef __cplusplus
extern "C" {
#endif
#define NFS_PORT 2049
#define NFS_MAXDATA 8192
#define NFS_MAXPATHLEN 1024
#define NFS_MAXNAMLEN 255
#define NFS_FHSIZE 32
#define NFS_COOKIESIZE 4
#define NFS_FIFO_DEV -1
#define NFSMODE_FMT 0170000
#define NFSMODE_DIR 0040000
#define NFSMODE_CHR 0020000
#define NFSMODE_BLK 0060000
#define NFSMODE_REG 0100000
#define NFSMODE_LNK 0120000
#define NFSMODE_SOCK 0140000
#define NFSMODE_FIFO 0010000
enum nfsstat {
NFS_OK = 0,
NFSERR_PERM = 1,
NFSERR_NOENT = 2,
NFSERR_IO = 5,
NFSERR_NXIO = 6,
NFSERR_ACCES = 13,
NFSERR_EXIST = 17,
NFSERR_NODEV = 19,
NFSERR_NOTDIR = 20,
NFSERR_ISDIR = 21,
NFSERR_FBIG = 27,
NFSERR_NOSPC = 28,
NFSERR_ROFS = 30,
NFSERR_NAMETOOLONG = 63,
NFSERR_NOTEMPTY = 66,
NFSERR_DQUOT = 69,
NFSERR_STALE = 70,
NFSERR_WFLUSH = 99,
_NFSSTAT = 0xffffffff
};
typedef enum nfsstat nfsstat;
enum ftype {
NFNON = 0,
NFREG = 1,
NFDIR = 2,
NFBLK = 3,
NFCHR = 4,
NFLNK = 5,
NFSOCK = 6,
NFBAD = 7,
NFFIFO = 8,
_FTYPE = 0xffffffff
};
typedef enum ftype ftype;
struct nfs_fh {
char data[NFS_FHSIZE];
};
typedef struct nfs_fh nfs_fh;
struct nfstime {
u_int seconds;
u_int useconds;
};
typedef struct nfstime nfstime;
struct fattr {
ftype type;
u_int mode;
u_int nlink;
u_int uid;
u_int gid;
u_int size;
u_int blocksize;
u_int rdev;
u_int blocks;
u_int fsid;
u_int fileid;
nfstime atime;
nfstime mtime;
nfstime ctime;
};
typedef struct fattr fattr;
struct sattr {
u_int mode;
u_int uid;
u_int gid;
u_int size;
nfstime atime;
nfstime mtime;
};
typedef struct sattr sattr;
typedef char *filename;
typedef char *nfspath;
struct attrstat {
nfsstat status;
union {
fattr attributes;
} attrstat_u;
};
typedef struct attrstat attrstat;
struct sattrargs {
nfs_fh file;
sattr attributes;
};
typedef struct sattrargs sattrargs;
struct diropargs {
nfs_fh dir;
filename name;
};
typedef struct diropargs diropargs;
struct diropokres {
nfs_fh file;
fattr attributes;
};
typedef struct diropokres diropokres;
struct diropres {
nfsstat status;
union {
diropokres diropres;
} diropres_u;
};
typedef struct diropres diropres;
struct readlinkres {
nfsstat status;
union {
nfspath data;
} readlinkres_u;
};
typedef struct readlinkres readlinkres;
struct readargs {
nfs_fh file;
u_int offset;
u_int count;
u_int totalcount;
};
typedef struct readargs readargs;
struct readokres {
fattr attributes;
struct {
u_int data_len;
char *data_val;
} data;
};
typedef struct readokres readokres;
struct readres {
nfsstat status;
union {
readokres reply;
} readres_u;
};
typedef struct readres readres;
struct writeargs {
nfs_fh file;
u_int beginoffset;
u_int offset;
u_int totalcount;
struct {
u_int data_len;
char *data_val;
} data;
};
typedef struct writeargs writeargs;
struct createargs {
diropargs where;
sattr attributes;
};
typedef struct createargs createargs;
struct renameargs {
diropargs from;
diropargs to;
};
typedef struct renameargs renameargs;
struct linkargs {
nfs_fh from;
diropargs to;
};
typedef struct linkargs linkargs;
struct symlinkargs {
diropargs from;
nfspath to;
sattr attributes;
};
typedef struct symlinkargs symlinkargs;
struct nfscookie {
char data[NFS_COOKIESIZE];
};
typedef struct nfscookie nfscookie;
struct readdirargs {
nfs_fh dir;
nfscookie cookie;
u_int count;
};
typedef struct readdirargs readdirargs;
struct entry {
u_int fileid;
filename name;
nfscookie cookie;
struct entry *nextentry;
};
typedef struct entry entry;
struct dirlist {
entry *entries;
bool_t eof;
};
typedef struct dirlist dirlist;
struct readdirres {
nfsstat status;
union {
dirlist reply;
} readdirres_u;
};
typedef struct readdirres readdirres;
struct statfsokres {
u_int tsize;
u_int bsize;
u_int blocks;
u_int bfree;
u_int bavail;
};
typedef struct statfsokres statfsokres;
struct statfsres {
nfsstat status;
union {
statfsokres reply;
} statfsres_u;
};
typedef struct statfsres statfsres;
#define NFS_PROGRAM 100003
#define NFS_VERSION 2
#if defined(__STDC__) || defined(__cplusplus)
#define NFSPROC_NULL 0
extern void * nfsproc_null_2(void *, CLIENT *);
extern void * nfsproc_null_2_svc(void *, struct svc_req *);
#define NFSPROC_GETATTR 1
extern attrstat * nfsproc_getattr_2(nfs_fh *, CLIENT *);
extern attrstat * nfsproc_getattr_2_svc(nfs_fh *, struct svc_req *);
#define NFSPROC_SETATTR 2
extern attrstat * nfsproc_setattr_2(sattrargs *, CLIENT *);
extern attrstat * nfsproc_setattr_2_svc(sattrargs *, struct svc_req *);
#define NFSPROC_ROOT 3
extern void * nfsproc_root_2(void *, CLIENT *);
extern void * nfsproc_root_2_svc(void *, struct svc_req *);
#define NFSPROC_LOOKUP 4
extern diropres * nfsproc_lookup_2(diropargs *, CLIENT *);
extern diropres * nfsproc_lookup_2_svc(diropargs *, struct svc_req *);
#define NFSPROC_READLINK 5
extern readlinkres * nfsproc_readlink_2(nfs_fh *, CLIENT *);
extern readlinkres * nfsproc_readlink_2_svc(nfs_fh *, struct svc_req *);
#define NFSPROC_READ 6
extern readres * nfsproc_read_2(readargs *, CLIENT *);
extern readres * nfsproc_read_2_svc(readargs *, struct svc_req *);
#define NFSPROC_WRITECACHE 7
extern void * nfsproc_writecache_2(void *, CLIENT *);
extern void * nfsproc_writecache_2_svc(void *, struct svc_req *);
#define NFSPROC_WRITE 8
extern attrstat * nfsproc_write_2(writeargs *, CLIENT *);
extern attrstat * nfsproc_write_2_svc(writeargs *, struct svc_req *);
#define NFSPROC_CREATE 9
extern diropres * nfsproc_create_2(createargs *, CLIENT *);
extern diropres * nfsproc_create_2_svc(createargs *, struct svc_req *);
#define NFSPROC_REMOVE 10
extern nfsstat * nfsproc_remove_2(diropargs *, CLIENT *);
extern nfsstat * nfsproc_remove_2_svc(diropargs *, struct svc_req *);
#define NFSPROC_RENAME 11
extern nfsstat * nfsproc_rename_2(renameargs *, CLIENT *);
extern nfsstat * nfsproc_rename_2_svc(renameargs *, struct svc_req *);
#define NFSPROC_LINK 12
extern nfsstat * nfsproc_link_2(linkargs *, CLIENT *);
extern nfsstat * nfsproc_link_2_svc(linkargs *, struct svc_req *);
#define NFSPROC_SYMLINK 13
extern nfsstat * nfsproc_symlink_2(symlinkargs *, CLIENT *);
extern nfsstat * nfsproc_symlink_2_svc(symlinkargs *, struct svc_req *);
#define NFSPROC_MKDIR 14
extern diropres * nfsproc_mkdir_2(createargs *, CLIENT *);
extern diropres * nfsproc_mkdir_2_svc(createargs *, struct svc_req *);
#define NFSPROC_RMDIR 15
extern nfsstat * nfsproc_rmdir_2(diropargs *, CLIENT *);
extern nfsstat * nfsproc_rmdir_2_svc(diropargs *, struct svc_req *);
#define NFSPROC_READDIR 16
extern readdirres * nfsproc_readdir_2(readdirargs *, CLIENT *);
extern readdirres * nfsproc_readdir_2_svc(readdirargs *, struct svc_req *);
#define NFSPROC_STATFS 17
extern statfsres * nfsproc_statfs_2(nfs_fh *, CLIENT *);
extern statfsres * nfsproc_statfs_2_svc(nfs_fh *, struct svc_req *);
extern int nfs_program_2_freeresult (SVCXPRT *, xdrproc_t, caddr_t);
#else /* K&R C */
#define NFSPROC_NULL 0
extern void * nfsproc_null_2();
extern void * nfsproc_null_2_svc();
#define NFSPROC_GETATTR 1
extern attrstat * nfsproc_getattr_2();
extern attrstat * nfsproc_getattr_2_svc();
#define NFSPROC_SETATTR 2
extern attrstat * nfsproc_setattr_2();
extern attrstat * nfsproc_setattr_2_svc();
#define NFSPROC_ROOT 3
extern void * nfsproc_root_2();
extern void * nfsproc_root_2_svc();
#define NFSPROC_LOOKUP 4
extern diropres * nfsproc_lookup_2();
extern diropres * nfsproc_lookup_2_svc();
#define NFSPROC_READLINK 5
extern readlinkres * nfsproc_readlink_2();
extern readlinkres * nfsproc_readlink_2_svc();
#define NFSPROC_READ 6
extern readres * nfsproc_read_2();
extern readres * nfsproc_read_2_svc();
#define NFSPROC_WRITECACHE 7
extern void * nfsproc_writecache_2();
extern void * nfsproc_writecache_2_svc();
#define NFSPROC_WRITE 8
extern attrstat * nfsproc_write_2();
extern attrstat * nfsproc_write_2_svc();
#define NFSPROC_CREATE 9
extern diropres * nfsproc_create_2();
extern diropres * nfsproc_create_2_svc();
#define NFSPROC_REMOVE 10
extern nfsstat * nfsproc_remove_2();
extern nfsstat * nfsproc_remove_2_svc();
#define NFSPROC_RENAME 11
extern nfsstat * nfsproc_rename_2();
extern nfsstat * nfsproc_rename_2_svc();
#define NFSPROC_LINK 12
extern nfsstat * nfsproc_link_2();
extern nfsstat * nfsproc_link_2_svc();
#define NFSPROC_SYMLINK 13
extern nfsstat * nfsproc_symlink_2();
extern nfsstat * nfsproc_symlink_2_svc();
#define NFSPROC_MKDIR 14
extern diropres * nfsproc_mkdir_2();
extern diropres * nfsproc_mkdir_2_svc();
#define NFSPROC_RMDIR 15
extern nfsstat * nfsproc_rmdir_2();
extern nfsstat * nfsproc_rmdir_2_svc();
#define NFSPROC_READDIR 16
extern readdirres * nfsproc_readdir_2();
extern readdirres * nfsproc_readdir_2_svc();
#define NFSPROC_STATFS 17
extern statfsres * nfsproc_statfs_2();
extern statfsres * nfsproc_statfs_2_svc();
extern int nfs_program_2_freeresult ();
#endif /* K&R C */
/* the xdr functions */
#if defined(__STDC__) || defined(__cplusplus)
extern bool_t xdr_nfsstat (XDR *, nfsstat*);
extern bool_t xdr_ftype (XDR *, ftype*);
extern bool_t xdr_nfs_fh (XDR *, nfs_fh*);
extern bool_t xdr_nfstime (XDR *, nfstime*);
extern bool_t xdr_fattr (XDR *, fattr*);
extern bool_t xdr_sattr (XDR *, sattr*);
extern bool_t xdr_filename (XDR *, filename*);
extern bool_t xdr_nfspath (XDR *, nfspath*);
extern bool_t xdr_attrstat (XDR *, attrstat*);
extern bool_t xdr_sattrargs (XDR *, sattrargs*);
extern bool_t xdr_diropargs (XDR *, diropargs*);
extern bool_t xdr_diropokres (XDR *, diropokres*);
extern bool_t xdr_diropres (XDR *, diropres*);
extern bool_t xdr_readlinkres (XDR *, readlinkres*);
extern bool_t xdr_readargs (XDR *, readargs*);
extern bool_t xdr_readokres (XDR *, readokres*);
extern bool_t xdr_readres (XDR *, readres*);
extern bool_t xdr_writeargs (XDR *, writeargs*);
extern bool_t xdr_createargs (XDR *, createargs*);
extern bool_t xdr_renameargs (XDR *, renameargs*);
extern bool_t xdr_linkargs (XDR *, linkargs*);
extern bool_t xdr_symlinkargs (XDR *, symlinkargs*);
extern bool_t xdr_nfscookie (XDR *, nfscookie*);
extern bool_t xdr_readdirargs (XDR *, readdirargs*);
extern bool_t xdr_entry (XDR *, entry*);
extern bool_t xdr_dirlist (XDR *, dirlist*);
extern bool_t xdr_readdirres (XDR *, readdirres*);
extern bool_t xdr_statfsokres (XDR *, statfsokres*);
extern bool_t xdr_statfsres (XDR *, statfsres*);
#else /* K&R C */
extern bool_t xdr_nfsstat ();
extern bool_t xdr_ftype ();
extern bool_t xdr_nfs_fh ();
extern bool_t xdr_nfstime ();
extern bool_t xdr_fattr ();
extern bool_t xdr_sattr ();
extern bool_t xdr_filename ();
extern bool_t xdr_nfspath ();
extern bool_t xdr_attrstat ();
extern bool_t xdr_sattrargs ();
extern bool_t xdr_diropargs ();
extern bool_t xdr_diropokres ();
extern bool_t xdr_diropres ();
extern bool_t xdr_readlinkres ();
extern bool_t xdr_readargs ();
extern bool_t xdr_readokres ();
extern bool_t xdr_readres ();
extern bool_t xdr_writeargs ();
extern bool_t xdr_createargs ();
extern bool_t xdr_renameargs ();
extern bool_t xdr_linkargs ();
extern bool_t xdr_symlinkargs ();
extern bool_t xdr_nfscookie ();
extern bool_t xdr_readdirargs ();
extern bool_t xdr_entry ();
extern bool_t xdr_dirlist ();
extern bool_t xdr_readdirres ();
extern bool_t xdr_statfsokres ();
extern bool_t xdr_statfsres ();
#endif /* K&R C */
#ifdef __cplusplus
}
#endif
/**@}*/
#endif /* !_NFS_PROT_H_RPCGEN */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,628 @@
/**
* @file
*
* @brief NFS Prot XDR
* @ingroup libfs_nfsclient_nfs_prot NFS Prot
*/
/*
* Please do not edit this file.
* It was generated using rpcgen.
*/
#include "nfs_prot.h"
#ifndef lint
/*static char sccsid[] = "from: @(#)nfs_prot.x 1.2 87/10/12 Copyr 1987 Sun Micro";*/
/*static char sccsid[] = "from: @(#)nfs_prot.x 2.1 88/08/01 4.0 RPCSRC";*/
#if !defined(__rtems__)
static char rcsid[] = "$Id$";
#endif
#endif /* not lint */
bool_t
xdr_nfsstat (XDR *xdrs, nfsstat *objp)
{
if (!xdr_enum (xdrs, (enum_t *) objp))
return FALSE;
return TRUE;
}
bool_t
xdr_ftype (XDR *xdrs, ftype *objp)
{
if (!xdr_enum (xdrs, (enum_t *) objp))
return FALSE;
return TRUE;
}
bool_t
xdr_nfs_fh (XDR *xdrs, nfs_fh *objp)
{
if (!xdr_opaque (xdrs, objp->data, NFS_FHSIZE))
return FALSE;
return TRUE;
}
bool_t
xdr_nfstime (XDR *xdrs, nfstime *objp)
{
if (!xdr_u_int (xdrs, &objp->seconds))
return FALSE;
if (!xdr_u_int (xdrs, &objp->useconds))
return FALSE;
return TRUE;
}
bool_t
xdr_fattr (XDR *xdrs, fattr *objp)
{
register int32_t *buf;
if (xdrs->x_op == XDR_ENCODE) {
if (!xdr_ftype (xdrs, &objp->type))
return FALSE;
buf = XDR_INLINE (xdrs, 10 * BYTES_PER_XDR_UNIT);
if (buf == NULL) {
if (!xdr_u_int (xdrs, &objp->mode))
return FALSE;
if (!xdr_u_int (xdrs, &objp->nlink))
return FALSE;
if (!xdr_u_int (xdrs, &objp->uid))
return FALSE;
if (!xdr_u_int (xdrs, &objp->gid))
return FALSE;
if (!xdr_u_int (xdrs, &objp->size))
return FALSE;
if (!xdr_u_int (xdrs, &objp->blocksize))
return FALSE;
if (!xdr_u_int (xdrs, &objp->rdev))
return FALSE;
if (!xdr_u_int (xdrs, &objp->blocks))
return FALSE;
if (!xdr_u_int (xdrs, &objp->fsid))
return FALSE;
if (!xdr_u_int (xdrs, &objp->fileid))
return FALSE;
} else {
IXDR_PUT_U_LONG(buf, objp->mode);
IXDR_PUT_U_LONG(buf, objp->nlink);
IXDR_PUT_U_LONG(buf, objp->uid);
IXDR_PUT_U_LONG(buf, objp->gid);
IXDR_PUT_U_LONG(buf, objp->size);
IXDR_PUT_U_LONG(buf, objp->blocksize);
IXDR_PUT_U_LONG(buf, objp->rdev);
IXDR_PUT_U_LONG(buf, objp->blocks);
IXDR_PUT_U_LONG(buf, objp->fsid);
IXDR_PUT_U_LONG(buf, objp->fileid);
}
if (!xdr_nfstime (xdrs, &objp->atime))
return FALSE;
if (!xdr_nfstime (xdrs, &objp->mtime))
return FALSE;
if (!xdr_nfstime (xdrs, &objp->ctime))
return FALSE;
return TRUE;
} else if (xdrs->x_op == XDR_DECODE) {
if (!xdr_ftype (xdrs, &objp->type))
return FALSE;
buf = XDR_INLINE (xdrs, 10 * BYTES_PER_XDR_UNIT);
if (buf == NULL) {
if (!xdr_u_int (xdrs, &objp->mode))
return FALSE;
if (!xdr_u_int (xdrs, &objp->nlink))
return FALSE;
if (!xdr_u_int (xdrs, &objp->uid))
return FALSE;
if (!xdr_u_int (xdrs, &objp->gid))
return FALSE;
if (!xdr_u_int (xdrs, &objp->size))
return FALSE;
if (!xdr_u_int (xdrs, &objp->blocksize))
return FALSE;
if (!xdr_u_int (xdrs, &objp->rdev))
return FALSE;
if (!xdr_u_int (xdrs, &objp->blocks))
return FALSE;
if (!xdr_u_int (xdrs, &objp->fsid))
return FALSE;
if (!xdr_u_int (xdrs, &objp->fileid))
return FALSE;
} else {
objp->mode = IXDR_GET_U_LONG(buf);
objp->nlink = IXDR_GET_U_LONG(buf);
objp->uid = IXDR_GET_U_LONG(buf);
objp->gid = IXDR_GET_U_LONG(buf);
objp->size = IXDR_GET_U_LONG(buf);
objp->blocksize = IXDR_GET_U_LONG(buf);
objp->rdev = IXDR_GET_U_LONG(buf);
objp->blocks = IXDR_GET_U_LONG(buf);
objp->fsid = IXDR_GET_U_LONG(buf);
objp->fileid = IXDR_GET_U_LONG(buf);
}
if (!xdr_nfstime (xdrs, &objp->atime))
return FALSE;
if (!xdr_nfstime (xdrs, &objp->mtime))
return FALSE;
if (!xdr_nfstime (xdrs, &objp->ctime))
return FALSE;
return TRUE;
}
if (!xdr_ftype (xdrs, &objp->type))
return FALSE;
if (!xdr_u_int (xdrs, &objp->mode))
return FALSE;
if (!xdr_u_int (xdrs, &objp->nlink))
return FALSE;
if (!xdr_u_int (xdrs, &objp->uid))
return FALSE;
if (!xdr_u_int (xdrs, &objp->gid))
return FALSE;
if (!xdr_u_int (xdrs, &objp->size))
return FALSE;
if (!xdr_u_int (xdrs, &objp->blocksize))
return FALSE;
if (!xdr_u_int (xdrs, &objp->rdev))
return FALSE;
if (!xdr_u_int (xdrs, &objp->blocks))
return FALSE;
if (!xdr_u_int (xdrs, &objp->fsid))
return FALSE;
if (!xdr_u_int (xdrs, &objp->fileid))
return FALSE;
if (!xdr_nfstime (xdrs, &objp->atime))
return FALSE;
if (!xdr_nfstime (xdrs, &objp->mtime))
return FALSE;
if (!xdr_nfstime (xdrs, &objp->ctime))
return FALSE;
return TRUE;
}
bool_t
xdr_sattr (XDR *xdrs, sattr *objp)
{
register int32_t *buf;
if (xdrs->x_op == XDR_ENCODE) {
buf = XDR_INLINE (xdrs, 4 * BYTES_PER_XDR_UNIT);
if (buf == NULL) {
if (!xdr_u_int (xdrs, &objp->mode))
return FALSE;
if (!xdr_u_int (xdrs, &objp->uid))
return FALSE;
if (!xdr_u_int (xdrs, &objp->gid))
return FALSE;
if (!xdr_u_int (xdrs, &objp->size))
return FALSE;
} else {
IXDR_PUT_U_LONG(buf, objp->mode);
IXDR_PUT_U_LONG(buf, objp->uid);
IXDR_PUT_U_LONG(buf, objp->gid);
IXDR_PUT_U_LONG(buf, objp->size);
}
if (!xdr_nfstime (xdrs, &objp->atime))
return FALSE;
if (!xdr_nfstime (xdrs, &objp->mtime))
return FALSE;
return TRUE;
} else if (xdrs->x_op == XDR_DECODE) {
buf = XDR_INLINE (xdrs, 4 * BYTES_PER_XDR_UNIT);
if (buf == NULL) {
if (!xdr_u_int (xdrs, &objp->mode))
return FALSE;
if (!xdr_u_int (xdrs, &objp->uid))
return FALSE;
if (!xdr_u_int (xdrs, &objp->gid))
return FALSE;
if (!xdr_u_int (xdrs, &objp->size))
return FALSE;
} else {
objp->mode = IXDR_GET_U_LONG(buf);
objp->uid = IXDR_GET_U_LONG(buf);
objp->gid = IXDR_GET_U_LONG(buf);
objp->size = IXDR_GET_U_LONG(buf);
}
if (!xdr_nfstime (xdrs, &objp->atime))
return FALSE;
if (!xdr_nfstime (xdrs, &objp->mtime))
return FALSE;
return TRUE;
}
if (!xdr_u_int (xdrs, &objp->mode))
return FALSE;
if (!xdr_u_int (xdrs, &objp->uid))
return FALSE;
if (!xdr_u_int (xdrs, &objp->gid))
return FALSE;
if (!xdr_u_int (xdrs, &objp->size))
return FALSE;
if (!xdr_nfstime (xdrs, &objp->atime))
return FALSE;
if (!xdr_nfstime (xdrs, &objp->mtime))
return FALSE;
return TRUE;
}
bool_t
xdr_filename (XDR *xdrs, filename *objp)
{
if (!xdr_string (xdrs, objp, NFS_MAXNAMLEN))
return FALSE;
return TRUE;
}
bool_t
xdr_nfspath (XDR *xdrs, nfspath *objp)
{
if (!xdr_string (xdrs, objp, NFS_MAXPATHLEN))
return FALSE;
return TRUE;
}
bool_t
xdr_attrstat (XDR *xdrs, attrstat *objp)
{
if (!xdr_nfsstat (xdrs, &objp->status))
return FALSE;
switch (objp->status) {
case NFS_OK:
if (!xdr_fattr (xdrs, &objp->attrstat_u.attributes))
return FALSE;
break;
default:
break;
}
return TRUE;
}
bool_t
xdr_sattrargs (XDR *xdrs, sattrargs *objp)
{
if (!xdr_nfs_fh (xdrs, &objp->file))
return FALSE;
if (!xdr_sattr (xdrs, &objp->attributes))
return FALSE;
return TRUE;
}
bool_t
xdr_diropargs (XDR *xdrs, diropargs *objp)
{
if (!xdr_nfs_fh (xdrs, &objp->dir))
return FALSE;
if (!xdr_filename (xdrs, &objp->name))
return FALSE;
return TRUE;
}
bool_t
xdr_diropokres (XDR *xdrs, diropokres *objp)
{
if (!xdr_nfs_fh (xdrs, &objp->file))
return FALSE;
if (!xdr_fattr (xdrs, &objp->attributes))
return FALSE;
return TRUE;
}
bool_t
xdr_diropres (XDR *xdrs, diropres *objp)
{
if (!xdr_nfsstat (xdrs, &objp->status))
return FALSE;
switch (objp->status) {
case NFS_OK:
if (!xdr_diropokres (xdrs, &objp->diropres_u.diropres))
return FALSE;
break;
default:
break;
}
return TRUE;
}
bool_t
xdr_readlinkres (XDR *xdrs, readlinkres *objp)
{
if (!xdr_nfsstat (xdrs, &objp->status))
return FALSE;
switch (objp->status) {
case NFS_OK:
if (!xdr_nfspath (xdrs, &objp->readlinkres_u.data))
return FALSE;
break;
default:
break;
}
return TRUE;
}
bool_t
xdr_readargs (XDR *xdrs, readargs *objp)
{
if (!xdr_nfs_fh (xdrs, &objp->file))
return FALSE;
if (!xdr_u_int (xdrs, &objp->offset))
return FALSE;
if (!xdr_u_int (xdrs, &objp->count))
return FALSE;
if (!xdr_u_int (xdrs, &objp->totalcount))
return FALSE;
return TRUE;
}
bool_t
xdr_readokres (XDR *xdrs, readokres *objp)
{
if (!xdr_fattr (xdrs, &objp->attributes))
return FALSE;
if (!xdr_bytes (xdrs, (char **)&objp->data.data_val, (u_int *) &objp->data.data_len, NFS_MAXDATA))
return FALSE;
return TRUE;
}
bool_t
xdr_readres (XDR *xdrs, readres *objp)
{
if (!xdr_nfsstat (xdrs, &objp->status))
return FALSE;
switch (objp->status) {
case NFS_OK:
if (!xdr_readokres (xdrs, &objp->readres_u.reply))
return FALSE;
break;
default:
break;
}
return TRUE;
}
bool_t
xdr_writeargs (XDR *xdrs, writeargs *objp)
{
register int32_t *buf;
if (xdrs->x_op == XDR_ENCODE) {
if (!xdr_nfs_fh (xdrs, &objp->file))
return FALSE;
buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
if (buf == NULL) {
if (!xdr_u_int (xdrs, &objp->beginoffset))
return FALSE;
if (!xdr_u_int (xdrs, &objp->offset))
return FALSE;
if (!xdr_u_int (xdrs, &objp->totalcount))
return FALSE;
} else {
IXDR_PUT_U_LONG(buf, objp->beginoffset);
IXDR_PUT_U_LONG(buf, objp->offset);
IXDR_PUT_U_LONG(buf, objp->totalcount);
}
if (!xdr_bytes (xdrs, (char **)&objp->data.data_val, (u_int *) &objp->data.data_len, NFS_MAXDATA))
return FALSE;
return TRUE;
} else if (xdrs->x_op == XDR_DECODE) {
if (!xdr_nfs_fh (xdrs, &objp->file))
return FALSE;
buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
if (buf == NULL) {
if (!xdr_u_int (xdrs, &objp->beginoffset))
return FALSE;
if (!xdr_u_int (xdrs, &objp->offset))
return FALSE;
if (!xdr_u_int (xdrs, &objp->totalcount))
return FALSE;
} else {
objp->beginoffset = IXDR_GET_U_LONG(buf);
objp->offset = IXDR_GET_U_LONG(buf);
objp->totalcount = IXDR_GET_U_LONG(buf);
}
if (!xdr_bytes (xdrs, (char **)&objp->data.data_val, (u_int *) &objp->data.data_len, NFS_MAXDATA))
return FALSE;
return TRUE;
}
if (!xdr_nfs_fh (xdrs, &objp->file))
return FALSE;
if (!xdr_u_int (xdrs, &objp->beginoffset))
return FALSE;
if (!xdr_u_int (xdrs, &objp->offset))
return FALSE;
if (!xdr_u_int (xdrs, &objp->totalcount))
return FALSE;
if (!xdr_bytes (xdrs, (char **)&objp->data.data_val, (u_int *) &objp->data.data_len, NFS_MAXDATA))
return FALSE;
return TRUE;
}
bool_t
xdr_createargs (XDR *xdrs, createargs *objp)
{
if (!xdr_diropargs (xdrs, &objp->where))
return FALSE;
if (!xdr_sattr (xdrs, &objp->attributes))
return FALSE;
return TRUE;
}
bool_t
xdr_renameargs (XDR *xdrs, renameargs *objp)
{
if (!xdr_diropargs (xdrs, &objp->from))
return FALSE;
if (!xdr_diropargs (xdrs, &objp->to))
return FALSE;
return TRUE;
}
bool_t
xdr_linkargs (XDR *xdrs, linkargs *objp)
{
if (!xdr_nfs_fh (xdrs, &objp->from))
return FALSE;
if (!xdr_diropargs (xdrs, &objp->to))
return FALSE;
return TRUE;
}
bool_t
xdr_symlinkargs (XDR *xdrs, symlinkargs *objp)
{
if (!xdr_diropargs (xdrs, &objp->from))
return FALSE;
if (!xdr_nfspath (xdrs, &objp->to))
return FALSE;
if (!xdr_sattr (xdrs, &objp->attributes))
return FALSE;
return TRUE;
}
bool_t
xdr_nfscookie (XDR *xdrs, nfscookie *objp)
{
if (!xdr_opaque (xdrs, objp->data, NFS_COOKIESIZE))
return FALSE;
return TRUE;
}
bool_t
xdr_readdirargs (XDR *xdrs, readdirargs *objp)
{
if (!xdr_nfs_fh (xdrs, &objp->dir))
return FALSE;
if (!xdr_nfscookie (xdrs, &objp->cookie))
return FALSE;
if (!xdr_u_int (xdrs, &objp->count))
return FALSE;
return TRUE;
}
bool_t
xdr_entry (XDR *xdrs, entry *objp)
{
if (!xdr_u_int (xdrs, &objp->fileid))
return FALSE;
if (!xdr_filename (xdrs, &objp->name))
return FALSE;
if (!xdr_nfscookie (xdrs, &objp->cookie))
return FALSE;
if (!xdr_pointer (xdrs, (char **)&objp->nextentry, sizeof (entry), (xdrproc_t) xdr_entry))
return FALSE;
return TRUE;
}
bool_t
xdr_dirlist (XDR *xdrs, dirlist *objp)
{
if (!xdr_pointer (xdrs, (char **)&objp->entries, sizeof (entry), (xdrproc_t) xdr_entry))
return FALSE;
if (!xdr_bool (xdrs, &objp->eof))
return FALSE;
return TRUE;
}
bool_t
xdr_readdirres (XDR *xdrs, readdirres *objp)
{
if (!xdr_nfsstat (xdrs, &objp->status))
return FALSE;
switch (objp->status) {
case NFS_OK:
if (!xdr_dirlist (xdrs, &objp->readdirres_u.reply))
return FALSE;
break;
default:
break;
}
return TRUE;
}
bool_t
xdr_statfsokres (XDR *xdrs, statfsokres *objp)
{
register int32_t *buf;
if (xdrs->x_op == XDR_ENCODE) {
buf = XDR_INLINE (xdrs, 5 * BYTES_PER_XDR_UNIT);
if (buf == NULL) {
if (!xdr_u_int (xdrs, &objp->tsize))
return FALSE;
if (!xdr_u_int (xdrs, &objp->bsize))
return FALSE;
if (!xdr_u_int (xdrs, &objp->blocks))
return FALSE;
if (!xdr_u_int (xdrs, &objp->bfree))
return FALSE;
if (!xdr_u_int (xdrs, &objp->bavail))
return FALSE;
} else {
IXDR_PUT_U_LONG(buf, objp->tsize);
IXDR_PUT_U_LONG(buf, objp->bsize);
IXDR_PUT_U_LONG(buf, objp->blocks);
IXDR_PUT_U_LONG(buf, objp->bfree);
IXDR_PUT_U_LONG(buf, objp->bavail);
}
return TRUE;
} else if (xdrs->x_op == XDR_DECODE) {
buf = XDR_INLINE (xdrs, 5 * BYTES_PER_XDR_UNIT);
if (buf == NULL) {
if (!xdr_u_int (xdrs, &objp->tsize))
return FALSE;
if (!xdr_u_int (xdrs, &objp->bsize))
return FALSE;
if (!xdr_u_int (xdrs, &objp->blocks))
return FALSE;
if (!xdr_u_int (xdrs, &objp->bfree))
return FALSE;
if (!xdr_u_int (xdrs, &objp->bavail))
return FALSE;
} else {
objp->tsize = IXDR_GET_U_LONG(buf);
objp->bsize = IXDR_GET_U_LONG(buf);
objp->blocks = IXDR_GET_U_LONG(buf);
objp->bfree = IXDR_GET_U_LONG(buf);
objp->bavail = IXDR_GET_U_LONG(buf);
}
return TRUE;
}
if (!xdr_u_int (xdrs, &objp->tsize))
return FALSE;
if (!xdr_u_int (xdrs, &objp->bsize))
return FALSE;
if (!xdr_u_int (xdrs, &objp->blocks))
return FALSE;
if (!xdr_u_int (xdrs, &objp->bfree))
return FALSE;
if (!xdr_u_int (xdrs, &objp->bavail))
return FALSE;
return TRUE;
}
bool_t
xdr_statfsres (XDR *xdrs, statfsres *objp)
{
if (!xdr_nfsstat (xdrs, &objp->status))
return FALSE;
switch (objp->status) {
case NFS_OK:
if (!xdr_statfsokres (xdrs, &objp->statfsres_u.reply))
return FALSE;
break;
default:
break;
}
return TRUE;
}

View File

@ -0,0 +1,14 @@
struct mbuf;
struct sockaddr;
ssize_t sendto_nocpy(int s, const void *buf, size_t buflen, int flags,
const struct sockaddr *toaddr, int tolen, void *closure, void
(*freeproc)(caddr_t, u_int), void (*refproc)(caddr_t, u_int));
ssize_t recv_mbuf_from(int s, struct mbuf **ppm, long len,
struct sockaddr *fromaddr, int *fromlen);
struct __rpc_xdr;
enum xdr_op;
void xdrmbuf_create(struct __rpc_xdr *, struct mbuf *, enum xdr_op);

1814
rtemsbsd/nfsclient/rpcio.c Normal file

File diff suppressed because it is too large Load Diff

226
rtemsbsd/nfsclient/rpcio.h Normal file
View File

@ -0,0 +1,226 @@
/**
* @file
*
* @brief A Multithreaded RPC/UDP Multiplexor
*
* @ingroup rtems-nfsclient
*/
/*
* Author: Till Straumann, <strauman@slac.stanford.edu>, 2002
*
* Authorship
* ----------
* This software (NFS-2 client implementation for RTEMS) was created by
* Till Straumann <strauman@slac.stanford.edu>, 2002-2007,
* Stanford Linear Accelerator Center, Stanford University.
*
* Acknowledgement of sponsorship
* ------------------------------
* The NFS-2 client implementation for RTEMS was produced by
* the Stanford Linear Accelerator Center, Stanford University,
* under Contract DE-AC03-76SFO0515 with the Department of Energy.
*
* Government disclaimer of liability
* ----------------------------------
* Neither the United States nor the United States Department of Energy,
* nor any of their employees, makes any warranty, express or implied, or
* assumes any legal liability or responsibility for the accuracy,
* completeness, or usefulness of any data, apparatus, product, or process
* disclosed, or represents that its use would not infringe privately owned
* rights.
*
* Stanford disclaimer of liability
* --------------------------------
* Stanford University makes no representations or warranties, express or
* implied, nor assumes any liability for the use of this software.
*
* Stanford disclaimer of copyright
* --------------------------------
* Stanford University, owner of the copyright, hereby disclaims its
* copyright and all other rights in this software. Hence, anyone may
* freely use it for any purpose without restriction.
*
* Maintenance of notices
* ----------------------
* In the interest of clarity regarding the origin and status of this
* SLAC software, this and all the preceding Stanford University notices
* are to remain affixed to any copy or derivative of this software made
* or distributed by the recipient and are to be affixed to any copy of
* software made or distributed by the recipient that contains a copy or
* derivative of this software.
*
* ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03
*/
#ifndef RPCIO_H
#define RPCIO_H
/**
* @defgroup rtems-nfsclient RPC/UDP Multiplexor
*
* @ingroup nfsclient
* @{
*/
#include <rpc/rpc.h>
#include <errno.h>
#include <sys/ioctl.h>
#include <sys/param.h>
#include <stdarg.h>
#include "librtemsNfs.h"
typedef struct RpcUdpServerRec_ *RpcUdpServer;
typedef struct RpcUdpXactRec_ *RpcUdpXact;
typedef RpcUdpXact RpcUdpClnt;
#define RPCIOD_DEFAULT_ID 0xdef10000
enum clnt_stat
rpcUdpServerCreate(
struct sockaddr_in *paddr,
rpcprog_t prog,
rpcvers_t vers,
u_long uid, /* RPCIO_DEFAULT_ID picks default */
u_long gid, /* RPCIO_DEFAULT_ID picks default */
RpcUdpServer *pclnt /* new server is returned here */
);
void
rpcUdpServerDestroy(RpcUdpServer s);
/**
* @brief Dump statistics to a file (stdout if NULL);
* @retval 0 for convenience
*/
int
rpcUdpStats(FILE *f);
enum clnt_stat
rpcUdpClntCreate(
struct sockaddr_in *psaddr,
rpcprog_t prog,
rpcvers_t vers,
u_long uid, /* RPCIO_DEFAULT_ID picks default */
u_long gid, /* RPCIO_DEFAULT_ID picks default */
RpcUdpClnt *pclnt /* new client is returned here */
);
void
RpcUdpClntDestroy(RpcUdpClnt clnt);
/**
* @brief Mute compiler warnings.
*/
typedef void *XdrProcT;
typedef void *CaddrT;
enum clnt_stat
rpcUdpClntCall(
RpcUdpClnt clnt,
u_long proc,
XdrProcT xargs,
CaddrT pargs,
XdrProcT xres,
CaddrT pres,
struct timeval *timeout /* optional timeout; maybe NULL to pick default */
);
RpcUdpXact
rpcUdpXactCreate(
u_long program,
u_long version,
u_long size
);
void
rpcUdpXactDestroy(
RpcUdpXact xact
);
/**
* Send a transaction.
*/
enum clnt_stat
rpcUdpSend(
RpcUdpXact xact,
RpcUdpServer srvr,
struct timeval *timeout, /* maybe NULL to pick default */
u_long proc,
xdrproc_t xres,
caddr_t pres,
xdrproc_t xargs,
caddr_t pargs,
... /* 0 terminated xdrproc/pobj additional argument list */
);
/**
* @brief Wait for a transaction to complete.
*/
enum clnt_stat
rpcUdpRcv(RpcUdpXact xact);
/* a yet simpler interface */
enum clnt_stat
rpcUdpCallRp(
struct sockaddr_in *pserver_addr,
u_long prog,
u_long vers,
u_long proc,
XdrProcT xargs,
CaddrT pargs,
XdrProcT xres,
CaddrT pres,
u_long uid, /* RPCIO_DEFAULT_ID picks default */
u_long gid, /* RPCIO_DEFAULT_ID picks default */
struct timeval *timeout /* NULL picks default */
);
/*
* @brief Manage pools of transactions.
*
* A pool of transactions. The idea is not to malloc/free them
* all the time but keep a limited number around in a 'pool'.
* Users who need a XACT may get it from the pool and put it back
* when done.
* The pool is implemented by RTEMS message queues who manage
* the required task synchronization.
* A requestor has different options if the pool is empty:
* - it can wait (block) for a XACT to become available
* - it can get an error status
* - or it can malloc an extra XACT from the heap which
* will eventually be released.
*/
typedef struct RpcUdpXactPoolRec_ *RpcUdpXactPool;
/* NOTE: the pool is empty initially, must get messages (in
* GetCreate mode
*/
RpcUdpXactPool
rpcUdpXactPoolCreate(
rpcprog_t prog, rpcvers_t version,
int xactsize, int poolsize);
void
rpcUdpXactPoolDestroy(RpcUdpXactPool pool);
typedef enum {
XactGetFail, /* call fails if no transaction available */
XactGetWait, /* call blocks until transaction available */
XactGetCreate /* a new transaction is allocated (and freed when put back to the pool */
} XactPoolGetMode;
RpcUdpXact
rpcUdpXactPoolGet(RpcUdpXactPool pool, XactPoolGetMode mode);
void
rpcUdpXactPoolPut(RpcUdpXact xact);
/** @} */
#endif