/*
 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
 *
 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
 * 
 * This file contains Original Code and/or Modifications of Original Code
 * as defined in and that are subject to the Apple Public Source License
 * Version 2.0 (the 'License'). You may not use this file except in
 * compliance with the License. The rights granted to you under the License
 * may not be used to create, or enable the creation or redistribution of,
 * unlawful or unlicensed copies of an Apple operating system, or to
 * circumvent, violate, or enable the circumvention or violation of, any
 * terms of an Apple operating system software license agreement.
 * 
 * Please obtain a copy of the License at
 * http://www.opensource.apple.com/apsl/ and read it before using this file.
 * 
 * The Original Code and all software distributed under the License are
 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
 * Please see the License for the specific language governing rights and
 * limitations under the License.
 * 
 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
 */
/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
/*
 * Copyright (c) 1989, 1991, 1993, 1995
 *	The Regents of the University of California.  All rights reserved.
 *
 * This code is derived from software contributed to Berkeley by
 * Rick Macklem at The University of Guelph.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. All advertising materials mentioning features or use of this software
 *    must display the following acknowledgement:
 *	This product includes software developed by the University of
 *	California, Berkeley and its contributors.
 * 4. Neither the name of the University nor the names of its contributors
 *    may be used to endorse or promote products derived from this software
 *    without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 */
/*
 * This file includes modifications of code found in the Apple XNU Kernel
 * and has been modifed for use in this Kext by Rick Macklem, May 2006.
 */
/*
 * Functions that need to be different for different versions of BSD
 * kernel should be kept here, along with any global storage specific
 * to this BSD variant.
 */
#ifndef APPLEKEXT
#include <newnfs/nfs/nfsport.h>
#include <bsm/audit_kernel.h>
extern struct mount nfsv4root_mnt;
extern int nfsrv_debug, nfstest_dropreplies;
extern int nfscl_ticks;
#ifdef NFSD
extern struct newnfs_sockhead newnfs_sockhead;
#endif
extern int newnfs_sockhead_flag;
extern struct newnfs_sockhead newnfscb_sockhead;
extern int newnfscb_sockhead_flag;
extern struct newnfsd_head newnfsd_head;
extern struct newnfsd_head newnfscbd_head;
#ifdef NFSD
extern struct nfssvc_sock *newnfs_udpsock;
#endif
extern struct nfssvc_sock *newnfs_cbudpsock;
extern int newnfsd_head_flag, newnfscbd_head_flag;
extern struct nfsstats newnfsstats;
extern int newnfsrtton;
extern struct nfsreqhead nfsd_reqq;
extern struct nfsrtt newnfsrtt;
extern u_int32_t newrpc_reply, newrpc_msgdenied, newrpc_mismatch, newrpc_autherr, newnfs_xidwrap;
extern time_t nclbuffreeuptimestamp;
extern int nfscl_enablecallb;
extern short nfsv4_cbport;
lck_mtx_t *nfs_gss_mutex;
lck_mtx_t *nfs_gssclhandle_mutex;
lck_spin_t *nfs_nameid_slock;
lck_mtx_t *newnfsd_mtx;
lck_spin_t *nfs_state_slock;
lck_mtx_t *nfs_sockl_mutex;
lck_mtx_t *nfs_req_slock;
lck_rw_t newnfs_export_rwlock;
struct newnfs_sockhead newnfs_deadsockhead;
struct newnfs_sockhead newnfscb_deadsockhead;
int newnfs_numnfsd = 0, nfs_numnfscbd = 0;
int nfs_realign_count, nfs_realign_test;
int nfsrv_testmalloclimit = 0;

/*
 * variables for managing the nfs_bind_resv_thread
 */
extern int ncl_resv_mounts;
lck_grp_t *nfs_bind_resv_lck_grp;
lck_grp_attr_t *nfs_bind_resv_lck_grp_attr;
lck_attr_t *nfs_bind_resv_lck_attr;
lck_mtx_t *ncl_bind_resv_mutex;

/* XXX */
boolean_t	current_thread_aborted(void);
kern_return_t	thread_terminate(thread_t);
#endif	/* !APPLEKEXT */
#define NFS_BIND_RESV_THREAD_STATE_INITTED	1
#define NFS_BIND_RESV_THREAD_STATE_RUNNING	2
static int nfs_bind_resv_thread_state = 0;
struct nfs_bind_resv_request {
	TAILQ_ENTRY(nfs_bind_resv_request) brr_chain;
	struct nfssockreq *brr_nrp;
	int brr_error;
};
static TAILQ_HEAD(, nfs_bind_resv_request) nfs_bind_resv_request_queue;

/*
 * For darwin, define this with the first argument being an sotype.
 */
#undef NFSIGNORE_SOERROR
#define	NFSIGNORE_SOERROR(s, e) \
		((e) != EINTR && (e) != ERESTART && (e) != EWOULDBLOCK && \
		 (e) != EIO && ((s)) != SOCK_STREAM)


static lck_grp_attr_t *nfs_gss_lck_grp_attr;
static lck_grp_t *nfs_gss_lck_grp;
static lck_attr_t *nfs_gss_lck_attr;
static lck_grp_attr_t *nfs_gssclhandle_lck_grp_attr;
static lck_grp_t *nfs_gssclhandle_lck_grp;
static lck_attr_t *nfs_gssclhandle_lck_attr;
static lck_grp_attr_t *nfs_nameid_lck_grp_attr;
static lck_grp_t *nfs_nameid_lck_grp;
static lck_attr_t *nfs_nameid_lck_attr;
static lck_grp_attr_t *newnfsd_lck_grp_attr;
static lck_grp_t *newnfsd_lck_grp;
static lck_attr_t *newnfsd_lck_attr;
static lck_grp_attr_t *nfs_state_lck_grp_attr;
static lck_grp_t *nfs_state_lck_grp;
static lck_attr_t *nfs_state_lck_attr;
static lck_grp_attr_t *nfs_sockl_lck_grp_attr;
static lck_grp_t *nfs_sockl_lck_grp;
static lck_attr_t *nfs_sockl_lck_attr;
static lck_grp_attr_t *nfs_req_lck_grp_attr;
static lck_grp_t *nfs_req_lck_grp;
static lck_attr_t *nfs_req_lck_attr;
static lck_grp_attr_t *nfs_export_group_attr;
static lck_attr_t *nfs_export_lock_attr;
static lck_grp_t *nfs_export_rwlock_group;
static lck_grp_attr_t *nfs_slp_group_attr;
static lck_attr_t *nfs_slp_lock_attr;
static lck_grp_t *nfs_slp_rwlock_group;
#ifdef NFSD
static int nfsd_waiting = 0;
#endif
static int nfscbd_waiting = 0;
static int ncl_mbuf_mlen = 0, ncl_mbuf_mhlen, ncl_mbuf_minclsize;
static int ncl_mbuf_mclbytes;
static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };

/*
 * Defines which timer to use for the procnum.
 * 0 - default
 * 1 - getattr
 * 2 - lookup
 * 3 - read
 * 4 - write
 */
static int proct[NFS_NPROCS] = {
	0, 1, 0, 2, 1, 3, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0,
	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};

/*
 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
 * Use the mean and mean deviation of rtt for the appropriate type of rpc
 * for the frequent rpcs and a default for the others.
 * The justification for doing "other" this way is that these rpcs
 * happen so infrequently that timer est. would probably be stale.
 * Also, since many of these rpcs are
 * non-idempotent, a conservative timeout is desired.
 * getattr, lookup - A+2D
 * read, write     - A+4D
 * other           - nm_timeo
 */
#define	NFS_RTO(n, t) \
	((t) == 0 ? (n)->nm_timeo : \
	 ((t) < 3 ? \
	  (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
	  ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
#define	NFS_SRTT(r)	(r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
#define	NFS_SDRTT(r)	(r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]

#ifdef NFSD
static void nfsrvd_rcv(struct socket *, caddr_t, int);
static void nfsrvd_rcv_locked(socket_t, struct nfssvc_sock *, int);
#endif
void nfsrvd_cbrcv(struct socket *, caddr_t, int);
static void nfsrvd_cbrcv_locked(socket_t, struct nfssvc_sock *, int);
static int nfs_rcvlock(struct nfsreq *);
static void nfs_rcvunlock(struct nfsreq *);
static int nfs_receive(struct nfsreq *, mbuf_t *, struct ucred *, proc_t);
static int nfs_reconnect(struct nfsreq *rep, struct ucred *);
static void nfs_repdequeue(struct nfsreq *rep);
static void nfs_down(struct nfsmount *, proc_t, int, int, const char *);
static void nfs_up(struct nfsmount *, proc_t, int, const char *);
static int nfsrv_msg(proc_t, char *, const char *, int);
static void nfsrv_slpderef(struct nfssvc_sock *, int);
static void newncl_mbuf_init(void);
static int newnfs_addsock(socket_t, mbuf_t);
static int newnfs_cbaddsock(socket_t, mbuf_t);
static void nfsrvd_wakenfscbd(struct nfssvc_sock *);

static void
nfs_down(struct nfsmount *nmp, proc_t proc, int error, int flags, const char *msg)
{
	if (nmp == NULL)
		return;
	if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
		vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESP, 0);
		nmp->nm_state |= NFSSTA_TIMEO;
	}
	if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
		vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESPLOCK, 0);
		nmp->nm_state |= NFSSTA_LOCKTIMEO;
	}
	nfsrv_msg(proc, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, error);
}

static void
nfs_up(struct nfsmount *nmp, proc_t proc, int flags, const char *msg)
{
	if (nmp == NULL)
		return;
	if (msg)
		nfsrv_msg(proc, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, 0);
	if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
		nmp->nm_state &= ~NFSSTA_TIMEO;
		vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESP, 1);
	}
	if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
		nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
		vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESPLOCK, 1);
	}
}

/*
 * attempt to bind a socket to a reserved port
 */
static int
nfs_bind_resv(struct nfssockreq *nrp)
{
	socket_t so = (socket_t)nrp->nr_so;
	struct sockaddr_in sin;
	int error;
	u_short tport;

	if (!so)
		return (EINVAL);

	sin.sin_len = sizeof (struct sockaddr_in);
	sin.sin_family = AF_INET;
	sin.sin_addr.s_addr = INADDR_ANY;
	tport = IPPORT_RESERVED - 1;
	sin.sin_port = htons(tport);

	while (((error = sock_bind(so, (struct sockaddr *) &sin)) == EADDRINUSE) &&
	       (--tport > IPPORT_RESERVED / 2))
		sin.sin_port = htons(tport);
	return (error);
}

/*
 * thread to handle any reserved port bind requests
 */
static void
nfs_bind_resv_thread(void)
{
	struct nfs_bind_resv_request *brreq;

	nfs_bind_resv_thread_state = NFS_BIND_RESV_THREAD_STATE_RUNNING;

	while (ncl_resv_mounts > 0) {
		lck_mtx_lock(ncl_bind_resv_mutex);
		while ((brreq = TAILQ_FIRST(&nfs_bind_resv_request_queue))) {
			TAILQ_REMOVE(&nfs_bind_resv_request_queue, brreq, brr_chain);
			lck_mtx_unlock(ncl_bind_resv_mutex);
			brreq->brr_error = nfs_bind_resv(brreq->brr_nrp);
			wakeup(brreq);
			lck_mtx_lock(ncl_bind_resv_mutex);
		}
		msleep((caddr_t)&nfs_bind_resv_request_queue,
				ncl_bind_resv_mutex, PSOCK | PDROP,
				"nfs_bind_resv_request_queue", NULL);
	}

	nfs_bind_resv_thread_state = NFS_BIND_RESV_THREAD_STATE_INITTED;
	(void) thread_terminate(current_thread());
}

APPLESTATIC int
ncl_bind_resv_thread_wake(void)
{
	if (nfs_bind_resv_thread_state < NFS_BIND_RESV_THREAD_STATE_RUNNING)
		return (EIO);
	wakeup(&nfs_bind_resv_request_queue);
	return (0);
}

/*
 * underprivileged procs call this to request nfs_bind_resv_thread
 * to perform the reserved port binding for them.
 */
static int
nfs_bind_resv_nopriv(struct nfssockreq *nrp)
{
	struct nfs_bind_resv_request brreq;
	int error;

	if (nfs_bind_resv_thread_state < NFS_BIND_RESV_THREAD_STATE_RUNNING) {
		if (nfs_bind_resv_thread_state < NFS_BIND_RESV_THREAD_STATE_INITTED) {
			nfs_bind_resv_lck_grp_attr = lck_grp_attr_alloc_init();
			nfs_bind_resv_lck_grp = lck_grp_alloc_init("nfs_bind_resv", nfs_bind_resv_lck_grp_attr);
			nfs_bind_resv_lck_attr = lck_attr_alloc_init();
			ncl_bind_resv_mutex = lck_mtx_alloc_init(nfs_bind_resv_lck_grp, nfs_bind_resv_lck_attr);
			TAILQ_INIT(&nfs_bind_resv_request_queue);
			nfs_bind_resv_thread_state = NFS_BIND_RESV_THREAD_STATE_INITTED;
		}
		kernel_thread(kernel_task, nfs_bind_resv_thread);
		nfs_bind_resv_thread_state = NFS_BIND_RESV_THREAD_STATE_RUNNING;
	}

	brreq.brr_nrp = nrp;
	brreq.brr_error = 0;

	lck_mtx_lock(ncl_bind_resv_mutex);
	TAILQ_INSERT_TAIL(&nfs_bind_resv_request_queue, &brreq, brr_chain);
	lck_mtx_unlock(ncl_bind_resv_mutex);

	error = ncl_bind_resv_thread_wake();
	if (error) {
		TAILQ_REMOVE(&nfs_bind_resv_request_queue, &brreq, brr_chain);
		/* Note: we might be able to simply restart the thread */
		return (error);
	}

	tsleep((caddr_t)&brreq, PSOCK, "nfsbindresv", 0);

	return (brreq.brr_error);
}

#ifdef NFSD
/*
 * Socket upcall routine for the nfsd sockets.
 * The caddr_t arg is a pointer to the "struct nfssvc_sock".
 * Essentially do as much as possible non-blocking, else punt and it will
 * be called with MBUF_WAITOK from an nfsd.
 */
static void
nfsrvd_rcv(socket_t so, caddr_t arg, int waitflag)
{
	struct nfssvc_sock *slp = (struct nfssvc_sock *)arg;

	if (!newnfs_numnfsd || !(slp->ns_flag & SLP_VALID))
		return;

	lck_rw_lock_exclusive(&slp->ns_rwlock);
	nfsrvd_rcv_locked(so, slp, waitflag);
	/* Note: ns_rwlock gets dropped when called with MBUF_DONTWAIT */
}
static void
nfsrvd_rcv_locked(socket_t so, struct nfssvc_sock *slp, int waitflag)
{
	mbuf_t m, mp, mhck, m2;
	int ns_flag=0, error;
	struct msghdr	msg;
	size_t bytes_read;

	if ((slp->ns_flag & SLP_VALID) == 0) {
		if (waitflag == MBUF_DONTWAIT)
			lck_rw_done(&slp->ns_rwlock);
		return;
	}

#ifdef notdef
	/*
	 * Define this to test for nfsds handling this under heavy load.
	 */
	if (waitflag == MBUF_DONTWAIT) {
		ns_flag = SLP_NEEDQ;
		goto dorecs;
	}
#endif
	if (slp->ns_sotype == SOCK_STREAM) {
		/*
		 * If there are already records on the queue, defer soreceive()
		 * to an nfsd so that there is feedback to the TCP layer that
		 * the nfs servers are heavily loaded.
		 */
		if (slp->ns_rec && waitflag == MBUF_DONTWAIT) {
			ns_flag = SLP_NEEDQ;
			goto dorecs;
		}

		/*
		 * Do soreceive().
		 */
		bytes_read = 1000000000;
		error = sock_receivembuf(so, NULL, &mp, MSG_DONTWAIT, &bytes_read);
		if (error || mp == NULL) {
			if (error == EWOULDBLOCK)
				ns_flag = SLP_NEEDQ;
			else
				ns_flag = SLP_DISCONN;
			goto dorecs;
		}
		m = mp;
		if (slp->ns_rawend) {
			if ((error = mbuf_setnext(slp->ns_rawend, m)))
				panic("nfsrv_rcv: mbuf_setnext failed %d\n", error);
			slp->ns_cc += bytes_read;
		} else {
			slp->ns_raw = m;
			slp->ns_cc = bytes_read;
		}
		while ((m2 = mbuf_next(m)))
			m = m2;
		slp->ns_rawend = m;

		/*
		 * Now try and parse record(s) out of the raw stream data.
		 */
		error = nfsrvd_getstream(slp, waitflag);
		if (error) {
			if (error == EPERM)
				ns_flag = SLP_DISCONN;
			else
				ns_flag = SLP_NEEDQ;
		}
	} else {
		struct sockaddr_storage	nam;
		
		bzero(&msg, sizeof(msg));
		msg.msg_name = (caddr_t)&nam;
		msg.msg_namelen = sizeof(nam);
		
		do {
			bytes_read = 1000000000;
			error = sock_receivembuf(so, &msg, &mp, MSG_DONTWAIT | MSG_NEEDSA, &bytes_read);
			if (mp) {
				if (msg.msg_name && (mbuf_get(MBUF_WAITOK, MBUF_TYPE_SONAME, &mhck) == 0)) {
					mbuf_setlen(mhck, nam.ss_len);
					bcopy(&nam, mbuf_data(mhck), nam.ss_len);
					m = mhck;
					if (mbuf_setnext(m, mp)) {
						/* trouble... just drop it */
						printf("nfsrv_rcv: mbuf_setnext failed\n");
						mbuf_free(mhck);
						m = mp;
					}
				} else {
					m = mp;
				}
				if (slp->ns_recend)
					mbuf_setnextpkt(slp->ns_recend, m);
				else
					slp->ns_rec = m;
				slp->ns_recend = m;
				mbuf_setnextpkt(m, NULL);
			}
#if 0
			if (error) {
				/*
				 * This may be needed in the future to support
				 * non-byte-stream connection-oriented protocols
				 * such as SCTP.
				 */
				/*
				 * This (slp->ns_sotype == SOCK_STREAM) should really
				 * be a check for PR_CONNREQUIRED.
				 */
				if ((slp->ns_sotype == SOCK_STREAM)
					&& error != EWOULDBLOCK) {
					ns_flag = SLP_DISCONN;
					goto dorecs;
				}
			}
#endif
		} while (mp);
	}

	/*
	 * Now try and process the request records, non-blocking.
	 */
dorecs:
	if (ns_flag)
		slp->ns_flag |= ns_flag;
	if (waitflag == MBUF_DONTWAIT) {
		int wake = (slp->ns_rec || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)));
		lck_rw_done(&slp->ns_rwlock);
		if (wake && newnfs_numnfsd) {
			lck_mtx_lock(newnfsd_mtx);
			nfsrvd_wakenfsd(slp);
			lck_mtx_unlock(newnfsd_mtx);
		}
	}
}
#endif	/* NFSD */

/*
 * Socket upcall routine for the nfscbd sockets.
 * The caddr_t arg is a pointer to the "struct nfssvc_sock".
 * Essentially do as much as possible non-blocking, else punt and it will
 * be called with MBUF_WAITOK from an nfsd.
 */
void
nfsrvd_cbrcv(struct socket *so, caddr_t arg, int waitflag)
{
	struct nfssvc_sock *slp = (struct nfssvc_sock *)arg;

	if (!nfs_numnfscbd || !(slp->ns_flag & SLP_VALID))
		return;

	lck_rw_lock_exclusive(&slp->ns_rwlock);
	nfsrvd_cbrcv_locked((socket_t)so, slp, waitflag);
	/* Note: ns_rwlock gets dropped when called with MBUF_DONTWAIT */
}
static void
nfsrvd_cbrcv_locked(socket_t so, struct nfssvc_sock *slp, int waitflag)
{
	mbuf_t m, mp, mhck, m2;
	int ns_flag=0, error;
	struct msghdr	msg;
	size_t bytes_read;

	if ((slp->ns_flag & SLP_VALID) == 0) {
		if (waitflag == MBUF_DONTWAIT)
			lck_rw_done(&slp->ns_rwlock);
		return;
	}

#ifdef notdef
	/*
	 * Define this to test for nfsds handling this under heavy load.
	 */
	if (waitflag == MBUF_DONTWAIT) {
		ns_flag = SLP_NEEDQ;
		goto dorecs;
	}
#endif
	if (slp->ns_sotype == SOCK_STREAM) {
		/*
		 * If there are already records on the queue, defer soreceive()
		 * to an nfsd so that there is feedback to the TCP layer that
		 * the nfs servers are heavily loaded.
		 */
		if (slp->ns_rec && waitflag == MBUF_DONTWAIT) {
			ns_flag = SLP_NEEDQ;
			goto dorecs;
		}

		/*
		 * Do soreceive().
		 */
		bytes_read = 1000000000;
		error = sock_receivembuf(so, NULL, &mp, MSG_DONTWAIT, &bytes_read);
		if (error || mp == NULL) {
			if (error == EWOULDBLOCK)
				ns_flag = SLP_NEEDQ;
			else
				ns_flag = SLP_DISCONN;
			goto dorecs;
		}
		m = mp;
		if (slp->ns_rawend) {
			if ((error = mbuf_setnext(slp->ns_rawend, m)))
				panic("nfsrv_rcv: mbuf_setnext failed %d\n", error);
			slp->ns_cc += bytes_read;
		} else {
			slp->ns_raw = m;
			slp->ns_cc = bytes_read;
		}
		while ((m2 = mbuf_next(m)))
			m = m2;
		slp->ns_rawend = m;

		/*
		 * Now try and parse record(s) out of the raw stream data.
		 */
		error = nfsrvd_getstream(slp, waitflag);
		if (error) {
			if (error == EPERM)
				ns_flag = SLP_DISCONN;
			else
				ns_flag = SLP_NEEDQ;
		}
	} else {
		struct sockaddr_storage	nam;
		
		bzero(&msg, sizeof(msg));
		msg.msg_name = (caddr_t)&nam;
		msg.msg_namelen = sizeof(nam);
		
		do {
			bytes_read = 1000000000;
			error = sock_receivembuf(so, &msg, &mp, MSG_DONTWAIT | MSG_NEEDSA, &bytes_read);
			if (mp) {
				if (msg.msg_name && (mbuf_get(MBUF_WAITOK, MBUF_TYPE_SONAME, &mhck) == 0)) {
					mbuf_setlen(mhck, nam.ss_len);
					bcopy(&nam, mbuf_data(mhck), nam.ss_len);
					m = mhck;
					if (mbuf_setnext(m, mp)) {
						/* trouble... just drop it */
						printf("nfsrv_rcv: mbuf_setnext failed\n");
						mbuf_free(mhck);
						m = mp;
					}
				} else {
					m = mp;
				}
				if (slp->ns_recend)
					mbuf_setnextpkt(slp->ns_recend, m);
				else
					slp->ns_rec = m;
				slp->ns_recend = m;
				mbuf_setnextpkt(m, NULL);
			}
#if 0
			if (error) {
				/*
				 * This may be needed in the future to support
				 * non-byte-stream connection-oriented protocols
				 * such as SCTP.
				 */
				/*
				 * This (slp->ns_sotype == SOCK_STREAM) should really
				 * be a check for PR_CONNREQUIRED.
				 */
				if ((slp->ns_sotype == SOCK_STREAM)
					&& error != EWOULDBLOCK) {
					ns_flag = SLP_DISCONN;
					goto dorecs;
				}
			}
#endif
		} while (mp);
	}

	/*
	 * Now try and process the request records, non-blocking.
	 */
dorecs:
	if (ns_flag)
		slp->ns_flag |= ns_flag;
	if (waitflag == MBUF_DONTWAIT) {
		int wake = (slp->ns_rec || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)));
		lck_rw_done(&slp->ns_rwlock);
		if (wake && nfs_numnfscbd) {
			lck_mtx_lock(newnfsd_mtx);
			nfsrvd_wakenfscbd(slp);
			lck_mtx_unlock(newnfsd_mtx);
		}
	}
}

/*
 * Try and extract an RPC request from the mbuf data list received on a
 * stream socket. The "waitflag" argument indicates whether or not it
 * can sleep.
 */
APPLESTATIC int
nfsrvd_getstream(struct nfssvc_sock *slp, int waitflag)
{
	mbuf_t m;
	char *cp1, *cp2, *mdata;
	int len, mlen, error;
	mbuf_t om, m2, recm;
	u_long recmark;

	if (slp->ns_flag & SLP_GETSTREAM)
		panic("nfs getstream");
	slp->ns_flag |= SLP_GETSTREAM;
	for (;;) {
	    if (slp->ns_reclen == 0) {
		if (slp->ns_cc < NFSX_UNSIGNED) {
			slp->ns_flag &= ~SLP_GETSTREAM;
			return (0);
		}
		m = slp->ns_raw;
		mdata = mbuf_data(m);
		mlen = mbuf_len(m);
		if (mlen >= NFSX_UNSIGNED) {
			bcopy(mdata, (caddr_t)&recmark, NFSX_UNSIGNED);
			mdata += NFSX_UNSIGNED;
			mlen -= NFSX_UNSIGNED;
			mbuf_setdata(m, mdata, mlen);
		} else {
			cp1 = (caddr_t)&recmark;
			cp2 = mdata;
			while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
				while (mlen == 0) {
					m = mbuf_next(m);
					cp2 = mbuf_data(m);
					mlen = mbuf_len(m);
				}
				*cp1++ = *cp2++;
				mlen--;
				mbuf_setdata(m, cp2, mlen);
			}
		}
		slp->ns_cc -= NFSX_UNSIGNED;
		recmark = ntohl(recmark);
		slp->ns_reclen = recmark & ~0x80000000;
		if (recmark & 0x80000000)
			slp->ns_flag |= SLP_LASTFRAG;
		else
			slp->ns_flag &= ~SLP_LASTFRAG;
		if (slp->ns_reclen < NFS_MINPACKET || slp->ns_reclen > NFS_MAXPACKET) {
			slp->ns_flag &= ~SLP_GETSTREAM;
			return (EPERM);
		}
	    }

	    /*
	     * Now get the record part.
	     *
	     * Note that slp->ns_reclen may be 0.  Linux sometimes
	     * generates 0-length RPCs
	     */
	    recm = NULL;
	    if (slp->ns_cc == slp->ns_reclen) {
		recm = slp->ns_raw;
		slp->ns_raw = slp->ns_rawend = NULL;
		slp->ns_cc = slp->ns_reclen = 0;
	    } else if (slp->ns_cc > slp->ns_reclen) {
		len = 0;
		m = slp->ns_raw;
		mlen = mbuf_len(m);
		mdata = mbuf_data(m);
		om = NULL;
		while (len < slp->ns_reclen) {
			if ((len + mlen) > slp->ns_reclen) {
				if (mbuf_copym(m, 0, slp->ns_reclen - len, waitflag, &m2)) {
					slp->ns_flag &= ~SLP_GETSTREAM;
					return (EWOULDBLOCK);
				}
				if (om) {
					if (mbuf_setnext(om, m2)) {
						/* trouble... just drop it */
						printf("nfsrv_getstream: mbuf_setnext failed\n");
						mbuf_freem(m2);
						slp->ns_flag &= ~SLP_GETSTREAM;
						return (EWOULDBLOCK);
					}
					recm = slp->ns_raw;
				} else {
					recm = m2;
				}
				mdata += slp->ns_reclen - len;
				mlen -= slp->ns_reclen - len;
				mbuf_setdata(m, mdata, mlen);
				len = slp->ns_reclen;
			} else if ((len + mlen) == slp->ns_reclen) {
				om = m;
				len += mlen;
				m = mbuf_next(m);
				recm = slp->ns_raw;
				if (mbuf_setnext(om, NULL)) {
					printf("nfsrv_getstream: mbuf_setnext failed 2\n");
					slp->ns_flag &= ~SLP_GETSTREAM;
					return (EWOULDBLOCK);
				}
				mlen = mbuf_len(m);
				mdata = mbuf_data(m);
			} else {
				om = m;
				len += mlen;
				m = mbuf_next(m);
				mlen = mbuf_len(m);
				mdata = mbuf_data(m);
			}
		}
		slp->ns_raw = m;
		slp->ns_cc -= len;
		slp->ns_reclen = 0;
	    } else {
		slp->ns_flag &= ~SLP_GETSTREAM;
		return (0);
	    }

	    /*
	     * Accumulate the fragments into a record.
	     */
	    if (slp->ns_frag == NULL) {
		slp->ns_frag = recm;
	    } else {
	        m = slp->ns_frag;
		while ((m2 = mbuf_next(m)))
		    m = m2;
		if ((error = mbuf_setnext(m, recm)))
		    panic("nfsrv_getstream: mbuf_setnext failed 3, %d\n", error);
	    }
	    if (slp->ns_flag & SLP_LASTFRAG) {
		if (slp->ns_recend)
		    mbuf_setnextpkt(slp->ns_recend, slp->ns_frag);
		else
		    slp->ns_rec = slp->ns_frag;
		slp->ns_recend = slp->ns_frag;
		slp->ns_frag = NULL;
	    }
	}
}

/*
 * Parse an RPC header.
 */
APPLESTATIC int
nfsrvd_dorec(struct nfssvc_sock *slp, __unused struct nfsd *nfsd,
    struct nfsrv_descript *nd, NFSPROC_T *p, int iscb)
{
	mbuf_t m;
	mbuf_t nam;
	int error;

	if ((slp->ns_flag & SLP_VALID) == 0 || (slp->ns_rec == NULL))
		return (ENOBUFS);
	m = slp->ns_rec;
	slp->ns_rec = mbuf_nextpkt(m);
	if (slp->ns_rec)
		mbuf_setnextpkt(m, NULL);
	else
		slp->ns_recend = NULL;
	if (mbuf_type(m) == MBUF_TYPE_SONAME) {
		nam = m;
		m = mbuf_next(m);
		if ((error = mbuf_setnext(nam, NULL)))
			panic("nfsrv_dorec: mbuf_setnext failed %d\n", error);
	} else
		nam = NULL;
	nd->nd_mreq = NULL;
	nd->nd_md = nd->nd_mrep = m;
	if (nam)
		nd->nd_nam = nam;
	else
		nd->nd_nam = slp->ns_nam;
	nd->nd_nam2 = nam;
	nd->nd_dpos = mbuf_data(m);
	nd->nd_gssp = NULL;
	nd->nd_sockref = slp->ns_sockref;
	nd->nd_tcpconntime = slp->ns_tcpconntime;
	if ((slp->ns_flag & SLP_SAMETCPCONN))
		nd->nd_flag |= ND_SAMETCPCONN;
	error = nfsrvd_getreq(nd, p, iscb);
	if (error) {
		if (nam)
			mbuf_freem(nam);
		return (error);
	}
	return (0);
}

/*
 * Test for a termination condition pending on the process.
 * This is used to determine if we need to bail on a mount.
 * EIO is returned if there has been a soft timeout.
 * EINTR is returned if there is a signal pending that is not being ignored
 * and the mount is interruptable, or if we are a thread that is in the process
 * of cancellation (also SIGKILL posted).
 */
APPLESTATIC int
newnfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, proc_t p)
{
	sigset_t pending_sigs;
	struct nfsmount *repnmp;

	/* request has timed out on a 'soft' mount. */
	if (rep != NULL) {
		if (rep->r_flags & R_UNMOUNT)
			return (ENXIO);
		if (rep->r_flags & R_SOFTTERM)
			return (EIO);
	}
	if (nmp == NULL)
		return (0);
	if (rep != NULL) {
		repnmp = rep->r_nmp;
		/* we've had a forced unmount. */
		if (rep->r_flags & R_UNMOUNT)
			return (ENXIO);
		if (repnmp == NULL)
			return (0);
		/* Someone is unmounting us, go soft and mark it. */
		if (repnmp->nm_mountp->mnt_kern_flag & MNTK_FRCUNMOUNT) {
			repnmp->nm_flag |= NFSMNT_SOFT;
			repnmp->nm_state |= NFSSTA_FORCE;
		}
		/*
		 * We're in the progress of a force unmount and there's
		 * been a timeout we're dead and fail IO.
		 */
		if ((repnmp->nm_state & (NFSSTA_FORCE|NFSSTA_TIMEO)) ==
		    (NFSSTA_FORCE|NFSSTA_TIMEO)) {
			return (EIO);
		}
		/*
		 * If the mount is hung and we've requested not to hang
		 * on remote filesystems, then bail now.
		 */
		if (p != NULL && (proc_noremotehang(p)) != 0 &&
		    (repnmp->nm_state & NFSSTA_TIMEO) != 0)
			return (EIO);
	}
	/* XXX: is this valid?  this probably should be an assertion. */
	if (p == NULL)
		return (0);

	/* Is this thread belongs to kernel task; then abort check  is not needed */
	if ((current_proc() != kernproc) && current_thread_aborted()) {
		return (EINTR);
	}
	/* mask off thread and process blocked signals. */

	pending_sigs = proc_pendingsignals(p, NFSINT_SIGMASK);
	if (pending_sigs && (nmp->nm_flag & NFSMNT_INT) != 0)
		return (EINTR);
	return (0);
}

#ifndef newnfs_realign
/*
 *	nfs_realign:
 *
 *	Check for badly aligned mbuf data and realign by copying the unaligned
 *	portion of the data into a new mbuf chain and freeing the portions
 *	of the old chain that were replaced.
 *
 *	We cannot simply realign the data within the existing mbuf chain
 *	because the underlying buffers may contain other rpc commands and
 *	we cannot afford to overwrite them.
 *
 *	We would prefer to avoid this situation entirely.  The situation does
 *	not occur with NFS/UDP and is supposed to only occassionally occur
 *	with TCP.  Use vfs.nfs.realign_count and realign_test to check this.
 */
APPLESTATIC void
newnfs_realign(struct mbuf **pm, int hsiz)
{
	struct mbuf *m;
	struct mbuf *n = NULL;
	int off = 0;

	++nfs_realign_test;
	while ((m = *pm) != NULL) {
		if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
			MGET(n, M_TRYWAIT, MT_DATA);
			if (m->m_len >= MINCLSIZE) {
				MCLGET(n, M_TRYWAIT);
			}
			n->m_len = 0;
			break;
		}
		pm = &m->m_next;
	}

	/*
	 * If n is non-NULL, loop on m copying data, then replace the
	 * portion of the chain that had to be realigned.
	 */
	if (n != NULL) {
		++nfs_realign_count;
		while (m) {
			m_copyback(n, off, m->m_len, mtod(m, caddr_t));
			off += m->m_len;
			m = m->m_next;
		}
		mbuf_freem(*pm);
		*pm = n;
	}
}
#endif	/* newnfs_realign */

/*
 * These functions are mostly clones of the ones in bsd/nfs/nfs_socket.c.
 * There are two major differences in how requests are handled:
 * 1 - r_nmp == NULL is now a normal occurrence. (nfsv4 server callbacks or
 *  upcalls to userland daemons)
 * As such, I've changed the following:
 * - put a check before anything that uses r_nmp to see that it is non-NULL
 * - replaced the tests for r_nmp == NULL to indicate unmounting with tests
 *   for a flag called R_UNMOUNT
 * - added a check to newnfs_sigintr() for nmp == NULL, which just returns 0,
 *   in case it gets called when r_nmp == NULL.
 * 2 - the timer routine in my code sets R_SOFTTERM when an rpc times out
 *   or an interrupting signal is posted
 * As such, whenever newnfs_sigintr() is called, I also check for the R_SOFTTERM
 * flag set. Since I'm not sure if the testing for posted signals can be
 * done correctly from the timer for Darwin, I've left the newnfs_sigintr() calls
 * in the non-timer code. I figure they're harmless.
 * rick macklem, Apr. 27, 2006.
 */
/*
 * Initialize sockets and congestion for a new NFS connection.
 * We do not free the sockaddr if error.
 */
APPLESTATIC int
newnfs_connect(struct nfsmount *nmp, struct nfssockreq *nrp,
    struct nfsreq *rep, __unused struct ucred *cred, __unused NFSPROC_T *p)
{
	socket_t so;
	int error, rcvreserve, sndreserve, proto, on = 1;
	struct sockaddr *saddr;
	struct timeval timeo, tv;
	proc_t sp;

DEBUG1PRINTF("in newnfs_connect\n");
	nrp->nr_so = NULL;
	saddr = (struct sockaddr *)mbuf_data(nrp->nr_nam);
	error = sock_socket(saddr->sa_family, nrp->nr_sotype, nrp->nr_soproto,
	    0, 0, (socket_t *)&nrp->nr_so);
DEBUG1PRINTF("aft sock=%d\n",error);
	if (error)
		goto bad;
	so = (socket_t)nrp->nr_so;

	/*
	 * Some servers require that the client port be a reserved port number.
	 * We always allocate a reserved port, as this prevents filehandle
	 * disclosure through UDP port capture.
	 * (I, personally, don't agree that reserved port #s improve security
	 *  for most cases, but others think it's a good thing and some servers
	 *  demand it, so...)
	 */
	if (saddr->sa_family == AF_INET && ((nrp->nr_lock & NFSR_RESERVEDPORT)||
	    (nmp && !(nmp->nm_flag & NFSMNT_NFSV4)))) {
		/*
		 * sobind() requires current_proc() to have superuser privs.
		 * If this bind is part of a reconnect, and the current proc
		 * doesn't have superuser privs, we hand the sobind() off to
		 * a kernel thread to process.
		 */
		if (nmp != NULL && (nmp->nm_state & NFSSTA_MOUNTED) &&
		    (sp = current_proc()) && suser(kauth_cred_get(), 0)) {
			/* request nfs_bind_resv_thread() to do bind */
			error = nfs_bind_resv_nopriv(nrp);
		} else {
DEBUG2PRINTF("at resv bind\n");
			error = nfs_bind_resv(nrp);
DEBUG2PRINTF("aft resv=%d\n",error);
		}
		if (error)
			goto bad;
	}

	tv.tv_sec = 5;
	tv.tv_usec = 0;
	error = sock_connect(so, (struct sockaddr *)mbuf_data(nrp->nr_nam),
	    MSG_DONTWAIT);
DEBUG2PRINTF("aft soconn=%d\n",error);
	if (error && error != EINPROGRESS) {
		goto bad;
	}
	
	while ((error = sock_connectwait(so, &tv)) == EINPROGRESS) {
		if (rep != NULL &&
		    (error = newnfs_sigintr(nmp, rep, rep->r_procp))) {
			goto bad;
		}
	}
DEBUG2PRINTF("aft soconnwait\n");

	/*
	 * Always time out on receive, this allows us to reconnect the
	 * socket to deal with network changes.
	 */
	timeo.tv_usec = 0;
	timeo.tv_sec = 5;
	error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo,
	    sizeof(timeo));
DEBUG2PRINTF("aft sockopt=%d\n",error);

	/*
	 * If nmp == NULL, this is server callback or upcall.
	 */
	if (nmp == NULL) {
		timeo.tv_sec = 1;
	} else if (nmp->nm_flag & (NFSMNT_SOFT | NFSMNT_INT)) {
		timeo.tv_sec = 5;
	} else {
		/*
		 * For TCP sockets, set the timeout so it will do
		 * a reconnect. This will RARELY HAPPEN, since
		 * getting stuck in send is unlikely, but just in
		 * case...
		 */
		if (nrp->nr_sotype == SOCK_STREAM) {
			if (nmp->nm_clp != NULL && nmp->nm_clp->nfsc_renew > 0)
				timeo.tv_sec = (nmp->nm_clp->nfsc_renew > 1) ?
				    (nmp->nm_clp->nfsc_renew / 2) : 1;
			else
				timeo.tv_sec = 30;
		} else {
			timeo.tv_sec = 0;
		}
	}
	error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo,
	    sizeof(timeo));
DEBUG2PRINTF("aft sockopt2=%d\n",error);

	if (nrp->nr_sotype == SOCK_DGRAM) {
		if (nmp != NULL) {
			sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 3;
			rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR) *
			   (nmp->nm_readahead > 0 ? nmp->nm_readahead + 1 : 2);
		} else {
			sndreserve = rcvreserve = 1024 * 3;
		}
	} else {
		sock_gettype(so, NULL, NULL, &proto);
		if (nrp->nr_sotype != SOCK_STREAM)
			panic("nfscon sotype");

		// Assume that SOCK_STREAM always requires a connection
		sock_setsockopt(so, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on));
DEBUG2PRINTF("aft keepalive\n");
		
		if (proto == IPPROTO_TCP) {
			sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on,
			    sizeof(on));
		}

		if (nmp != NULL) {
			sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR +
			    sizeof (u_long)) * 3;
			rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR +
			    sizeof (u_long)) *
			   (nmp->nm_readahead > 0 ? nmp->nm_readahead + 1 : 2);
		} else {
			sndreserve = rcvreserve = 1024 * 3;
		}
	}
	if (sndreserve > NFS_MAXSOCKBUF)
		sndreserve = NFS_MAXSOCKBUF;
	if (rcvreserve > NFS_MAXSOCKBUF)
		rcvreserve = NFS_MAXSOCKBUF;
	error = sock_setsockopt(so, SOL_SOCKET, SO_SNDBUF, &sndreserve,
	    sizeof(sndreserve));
DEBUG2PRINTF("aft sockopt3=%d\n",error);
	if (error) {
		goto bad;
	}
	error = sock_setsockopt(so, SOL_SOCKET, SO_RCVBUF, &rcvreserve,
	    sizeof(rcvreserve));
DEBUG2PRINTF("aft sockopt4=%d\n",error);
	if (error) {
		goto bad;
	}

	sock_nointerrupt(so, 1);

	/* Initialize other non-zero congestion variables */
	if (nmp != NULL) {
	    nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
		nmp->nm_srtt[3] = nmp->nm_srtt[4] = (NFS_TIMEO << 3);
	    nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
		nmp->nm_sdrtt[3] = nmp->nm_sdrtt[4] = 0;
	    nmp->nm_cwnd = NFS_MAXCWND / 2;	    /* Initial send window */
	    nmp->nm_sent = 0;
	    nmp->nm_timeouts = 0;
	}
DEBUG2PRINTF("eo newnfs_conn ok\n");
	return (0);

bad:
	newnfs_disconnect(nrp);
DEBUG1PRINTF("eo newnfs_conn err=%d\n",error);
	return (error);
}

/*
 * Reconnect routine:
 * Called when a connection is broken on a reliable protocol.
 * - clean up the old socket
 * - newnfs_connect() again
 * - set R_MUSTRESEND for all outstanding requests on mount point
 * If this fails the mount point is DEAD!
 * nb: Must be called with the newnfs_sndlock() set on the mount point.
 */
APPLESTATIC int
nfs_reconnect(struct nfsreq *rep, struct ucred *cred)
{
	register struct nfsreq *rp;
	register struct nfssockreq *nrp;
	int error;

	nrp = rep->r_nrp;
	newnfs_disconnect(nrp);
DEBUG1PRINTF("aft disconn\n");
	/*
	 * Wait at least one second before doing the connect. This seems
	 * to avoid the connect getting hung. Since lbolt wakes up once
	 * a second, tsleep on it twice.
	 */
	(void) tsleep((caddr_t)&lbolt, PSOCK, "nfscon", 0);
	(void) tsleep((caddr_t)&lbolt, PSOCK, "nfscon", 0);
	while ((error = newnfs_connect(rep->r_nmp,nrp,rep,cred,rep->r_procp))
		!= 0) {
DEBUG1PRINTF("reconn newnfs_conn err=%d\n",error);
		if (error == EINTR || error == ERESTART)
			return (EINTR);
		if (error == EIO)
			return (EIO);
		if (rep->r_nmp != NULL) {
			if (!(rep->r_flags & R_TPRINTFMSG))
				nfs_down(rep->r_nmp, rep->r_procp, error,
				    NFSSTA_TIMEO, "can not connect");
			lck_mtx_lock(nfs_req_slock);
			rep->r_flags |= R_TPRINTFMSG;
			lck_mtx_unlock(nfs_req_slock);
			if (!(rep->r_nmp->nm_state & NFSSTA_MOUNTED)) {
				/* we're not yet completely mounted and */
				/* we can't reconnect, so we fail */
				return (error);
			}
			if ((error = newnfs_sigintr(rep->r_nmp, rep, rep->r_procp)))
				return (error);
		}
		(void) tsleep((caddr_t)&lbolt, PSOCK, "nfscon", 0);
	}
DEBUG1PRINTF("aft conn in reconn\n");

	/*
	 * Loop through outstanding request list and fix up all requests
	 * on old socket.
	 */
	lck_mtx_lock(nfs_req_slock);
	TAILQ_FOREACH(rp, &nfsd_reqq, r_chain) {
		if (rp->r_nrp == nrp)
			rp->r_flags |= R_MUSTRESEND;
	}
	lck_mtx_unlock(nfs_req_slock);
	return (0);
}

/*
 * NFS disconnect. Clean up and unlink.
 */
APPLESTATIC void
newnfs_disconnect(struct nfssockreq *nrp)
{
	socket_t so;

DEBUG2PRINTF("in newnfs_disconn\n");
	if (nrp->nr_so) {
		so = (socket_t)nrp->nr_so;
		nrp->nr_so = NULL;
		sock_shutdown(so, 2);
		sock_close(so);
	}
}

/*
 * This is the nfs send routine. For connection based socket types, it
 * must be called with an nfs_sndlock() on the socket.
 * "rep == NULL" indicates that it has been called from a server.
 * For the client side:
 * - return EINTR if the RPC is terminated, 0 otherwise
 * - set R_MUSTRESEND if the send fails for any reason
 * - do any cleanup required by recoverable socket errors (???)
 * For the server side:
 * - return EINTR or ERESTART if interrupted by a signal
 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
 * - do any cleanup required by recoverable socket errors (???)
 */
APPLESTATIC int
newnfs_send(socket_t so, mbuf_t nam, mbuf_t top, struct nfsreq *rep, __unused proc_t p)
{
	struct sockaddr *sendnam;
	int error = 0, error2, sotype, flags;
	u_long xidqueued = 0;
	struct nfsreq *rp;
	char savenametolog[MAXPATHLEN];
	struct msghdr msg;
	
DEBUG2PRINTF("in newnfs_send\n");
	if (rep) {
		error = newnfs_sigintr(rep->r_nmp, rep, rep->r_procp);
DEBUG2PRINTF("aft sigint=%d\n",error);
		if (error) {
			mbuf_freem(top);
			return (error);
		}
		if ((so = (socket_t)rep->r_nrp->nr_so) == NULL) {
			lck_mtx_lock(nfs_req_slock);
			rep->r_flags |= R_MUSTRESEND;
			lck_mtx_unlock(nfs_req_slock);
			mbuf_freem(top);
DEBUG2PRINTF("must resend\n");
			return (0);
		}
		lck_mtx_lock(nfs_req_slock);
		rep->r_flags &= ~R_MUSTRESEND;
		TAILQ_FOREACH(rp, &nfsd_reqq, r_chain)
			if (rp == rep)
				break;
		if (rp != NULL)
			xidqueued = rp->r_xid;
		lck_mtx_unlock(nfs_req_slock);
	}
	sock_gettype(so, NULL, &sotype, NULL);
	if ((sotype == SOCK_STREAM) || (sock_isconnected(so)) ||
	    (nam == NULL))
		sendnam = NULL;
	else
		sendnam = (struct sockaddr *)mbuf_data(nam);

	flags = 0;

	/* 
	 * Save the name here in case mount point goes away if we block.
	 * The name is using local stack and is large, but don't
	 * want to block if we malloc.
	 */
	if (rep != NULL && rep->r_nmp != NULL)
		strncpy(savenametolog,
			vfs_statfs(rep->r_nmp->nm_mountp)->f_mntfromname,
			MAXPATHLEN - 1);
	bzero(&msg, sizeof(msg));
	msg.msg_name = (caddr_t)sendnam;
	msg.msg_namelen = sendnam == NULL ? 0 : sendnam->sa_len;
DEBUG2PRINTF("at sendmbuf\n");
	error = sock_sendmbuf(so, &msg, top, flags, NULL);
DEBUG2PRINTF("aft sendmbuf=%d\n",error);

	if (error) {
		if (rep) {
			if (xidqueued) {
				lck_mtx_lock(nfs_req_slock);
				TAILQ_FOREACH(rp, &nfsd_reqq, r_chain)
					if (rp == rep && rp->r_xid == xidqueued)
						break;
				if (!rp)
					panic("nfs_send: error %d xid %x gone",
					      error, xidqueued);
				lck_mtx_unlock(nfs_req_slock);
			}
			if (rep->r_nmp != NULL)
				log(LOG_INFO, "nfs send error %d for server %s\n",
				    error, savenametolog);
			/*
			 * Deal with errors for the client side.
			 */
			error2 = newnfs_sigintr(rep->r_nmp, rep, rep->r_procp);
DEBUG2PRINTF("aft sigint2=%d\n",error2);
			if (error2) {
				error = error2;
			} else {
				lck_mtx_lock(nfs_req_slock);
				rep->r_flags |= R_MUSTRESEND;
				lck_mtx_unlock(nfs_req_slock);
				/*
				 * Set the error to EPIPE to force a retry/reconnect.
				 */
				if (sotype == SOCK_STREAM) {
					error = EPIPE;
DEBUG1PRINTF("newnfs_send: set EPIPE\n");
				}
			}
		} else
			log(LOG_INFO, "nfsd send error %d\n", error);

		/*
		 * Handle any recoverable (soft) socket errors here. (???)
		 */
		if (error != EINTR && error != ERESTART && error != EIO &&
			error != EWOULDBLOCK && error != EPIPE) {
			error = 0;
		}
	}
DEBUG2PRINTF("eo newnfs_send=%d\n",error);
	return (error);
}

/*
 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all
 * done by soreceive(), but for SOCK_STREAM we must deal with the Record
 * Mark and consolidate the data into a new mbuf list.
 * nb: Sometimes TCP passes the data up to soreceive() in long lists of
 *     small mbufs.
 * For SOCK_STREAM we must be very careful to read an entire record once
 * we have read any of it, even if the system call has been interrupted.
 */
static int
nfs_receive(struct nfsreq *rep, mbuf_t *mp, struct ucred *cred, proc_t p)
{
	socket_t so;
	struct iovec_32 aio;
	mbuf_t m, mlast;
	u_long len, fraglen;
	int error, error2, sotype;
	struct msghdr msg;
	size_t rcvlen;
	int lastfragment;
	int32_t etime;

	/*
	 * Set up arguments for soreceive()
	 */
	*mp = NULL;
	sotype = rep->r_nrp->nr_sotype;

	/*
	 * For reliable protocols, lock against other senders/receivers
	 * in case a reconnect is necessary.
	 * For SOCK_STREAM, first get the Record Mark to find out how much
	 * more there is to get.
	 * We must lock the socket against other receivers
	 * until we have an entire rpc request/reply.
	 */
	if (sotype != SOCK_DGRAM) {
		error = newnfs_sndlock(&rep->r_nrp->nr_lock, rep);
		if (error)
			return (error);
tryagain:
		/*
		 * Check for fatal errors and resending request.
		 */
		/*
		 * Ugh: If a reconnect attempt just happened, nr_so
		 * would have changed. NULL indicates a failed
		 * attempt that has essentially shut down this
		 * mount point.
		 */
		error = newnfs_sigintr(rep->r_nmp, rep, p);
		if (error || rep->r_mrep != NULL) {
DEBUGSPRINTF("nfs_rcv sigintr err=%d\n",error);
			newnfs_sndunlock(&rep->r_nrp->nr_lock);
			if (error)
				return (error);
			return (EINTR);
		}
		so = (socket_t)rep->r_nrp->nr_so;
		if (!so) {
DEBUGSPRINTF("nfs_rcv at reconn\n");
			error = nfs_reconnect(rep, cred); 
DEBUGSPRINTF("nfs_rcv recon err=%d\n",error);
			if (error) {
				newnfs_sndunlock(&rep->r_nrp->nr_lock);
DEBUGSPRINTF("aft2 sndunlck\n");
				return (error);
			}
			goto tryagain;
		}
		while (rep->r_flags & R_MUSTRESEND) {
DEBUGSPRINTF("nfs_rcv mustresend\n");
			if (rep->r_flags & R_GSS) {
			    /*
			     * For privacy, we must decrypt the data,
			     * so that it can be re-encrypted for the
			     * retry.
			     */
			    if ((rep->r_flags & R_PRIVACY) &&
				rep->r_procnum != NFSPROC_NULL)
				(void)nfsgss_des(rep->r_startm, rep->r_startpos,
				    rep->r_mrestlen + 3 * NFSX_UNSIGNED,
				    rep->r_key, 0);
			    mbuf_setnext(rep->r_mheadend, NULL);
			    mbuf_freem(rep->r_mreq);
			    if (mbuf_next(rep->r_mrestend)) {
				mbuf_freem(mbuf_next(rep->r_mrestend));
				mbuf_setnext(rep->r_mrestend, NULL);
			    }
			    newnfs_rpchead(NULL, rep);
			    if (rep->r_flags & R_AUTHERR) {
				newnfs_sndunlock(&rep->r_nrp->nr_lock);
				return (EAUTH);
			    }
			    if (rep->r_flags & R_SOFTTERM) {
				newnfs_sndunlock(&rep->r_nrp->nr_lock);
				return (EINTR);
			    }
			}
			error = mbuf_copym(rep->r_mreq, 0, MBUF_COPYALL, MBUF_WAITOK, &m);
DEBUGSPRINTF("nfs_rcv aft mcopym\n");
			if (!error) {
				if (rep->r_nmp != NULL)
					OSAddAtomic(1, (SInt32*)&newnfsstats.rpcretries);
				error = newnfs_send(so, rep->r_nrp->nr_nam, m, rep, p);
			}
DEBUGSPRINTF("nfs_rcv resent err=%d\n",error);
			if (error) {
				if (error == EINTR || error == ERESTART ||
				    (error = nfs_reconnect(rep, cred))) {
					newnfs_sndunlock(&rep->r_nrp->nr_lock);
DEBUGSPRINTF("reconn2 err=%d\n",error);
					return (error);
				}
				goto tryagain;
			}
		}
		newnfs_sndunlock(&rep->r_nrp->nr_lock);
		if (sotype == SOCK_STREAM) {
			error = 0;
			len = 0;
			lastfragment = 0;
			mlast = NULL;
			while (!error && !lastfragment) {
				if (rep->r_nmp && rep->r_nmp->nm_clp &&
				    rep->r_nmp->nm_clp->nfsc_renew > 0)
					etime = NFSD_MONOSEC + ((rep->r_nmp->nm_clp->nfsc_renew > 1) ?
					    (rep->r_nmp->nm_clp->nfsc_renew / 2) : 1);
				else
					etime = NFSD_MONOSEC + 60;
				do {
				   fraglen = 0;
				   aio.iov_base = (uintptr_t) &fraglen;
				   aio.iov_len = sizeof(u_long);
				   bzero(&msg, sizeof(msg));
				   msg.msg_iov = (struct iovec *) &aio;
				   msg.msg_iovlen = 1;
DEBUGSPRINTF("at sorcv1\n");
				   error = sock_receive(so, &msg, MSG_WAITALL, &rcvlen);
DEBUGSPRINTF("aft sorcv1=%d len=%d\n",error,rcvlen);
				   if (rep->r_flags & R_UNMOUNT) { /* if unmounted then bailout */
DEBUGSPRINTF("shutout1\n");
					goto shutout;
				   }
				   if (error == EWOULDBLOCK && rep) {
DEBUGSPRINTF("ewouldblock1\n");
					    error2 = newnfs_sigintr(rep->r_nmp,
						rep, p);
DEBUGSPRINTF("err2=%d\n",error2);
					    if (error2)
						error = error2;
				    }
				    if (error == EWOULDBLOCK && etime < NFSD_MONOSEC) {
DEBUGSPRINTF("nfs_receive set EPIPE\n");
					    error = EPIPE;
				    }
				} while (error == EWOULDBLOCK);
				if (!error && rcvlen < aio.iov_len) {
DEBUGSPRINTF("short rcv=%qd\n",rcvlen);
				    /* only log a message if we got a partial word */
				    if (rcvlen != 0 && rep->r_nmp != NULL)
					    log(LOG_INFO,
						 "short receive (%d/%d) from nfs server %s\n",
						 rcvlen, sizeof(u_long),
						 vfs_statfs(rep->r_nmp->nm_mountp)->f_mntfromname);
				    error = EPIPE;
				}
				if (error)
					goto errout;
				lastfragment = ntohl(fraglen) & 0x80000000;
				fraglen = ntohl(fraglen) & ~0x80000000;
				len += fraglen;
				/*
				 * This is SERIOUS! We are out of sync with the sender
				 * and forcing a disconnect/reconnect is all I can do.
				 */
				if (len > NFS_MAXPACKET) {
				    if (rep->r_nmp != NULL)
					log(LOG_ERR, "%s (%d) from nfs server %s\n",
					    "impossible RPC record length", len,
					    vfs_statfs(rep->r_nmp->nm_mountp)->f_mntfromname);
				    error = EFBIG;
				    goto errout;
				}

				m = NULL;
				if (rep->r_nmp && rep->r_nmp->nm_clp &&
				    rep->r_nmp->nm_clp->nfsc_renew > 0)
					etime = NFSD_MONOSEC + ((rep->r_nmp->nm_clp->nfsc_renew > 1) ?
					    (rep->r_nmp->nm_clp->nfsc_renew / 2) : 1);
				else
					etime = NFSD_MONOSEC + 60;
				do {
				    rcvlen = fraglen;
DEBUGSPRINTF("at sorcv2 len=%d\n",fraglen);
				    error = sock_receivembuf(so, NULL, &m, MSG_WAITALL, &rcvlen);
DEBUGSPRINTF("aft sorcv2=%d\n",error);
				    if (rep->r_flags & R_UNMOUNT) /* if unmounted then bailout */ {
DEBUGSPRINTF("shutout2\n");
					goto shutout;
				    }
				    if (error == EWOULDBLOCK && etime < NFSD_MONOSEC) {
DEBUGSPRINTF("nfs_receive set EPIPE2\n");
					error = EPIPE;
				    }
				} while (error == EWOULDBLOCK || error == EINTR ||
					 error == ERESTART);

				if (!error && fraglen > rcvlen) {
				    if (rep->r_nmp != NULL)
					log(LOG_INFO,
					    "short receive (%d/%d) from nfs server %s\n",
					    rcvlen, fraglen,
					    vfs_statfs(rep->r_nmp->nm_mountp)->f_mntfromname);
				    error = EPIPE;
				    mbuf_freem(m);
				}
				if (!error) {
					if (!*mp) {
						*mp = m;
						mlast = m;
					} else {
						error = mbuf_setnext(mlast, m);
						if (error) {
							printf("nfs_receive: mbuf_setnext failed %d\n", error);
							mbuf_freem(m);
						}
					}
					while (mbuf_next(mlast))
						mlast = mbuf_next(mlast);
				}
			}
		} else {
			bzero(&msg, sizeof(msg));
			do {
			    rcvlen = 100000000;
			    error = sock_receivembuf(so, &msg, mp, 0, &rcvlen);
			    if (rep->r_flags & R_UNMOUNT) /* if unmounted then bailout */ {
				goto shutout;
 			    }   
			    if (error == EWOULDBLOCK && rep) {
				error2 = newnfs_sigintr(rep->r_nmp, rep, p);
				if (error2)
				    return (error2);
			    }
			} while (error == EWOULDBLOCK);

			if ((msg.msg_flags & MSG_EOR) == 0)
				printf("Egad!!\n");
			if (!error && *mp == NULL)
				error = EPIPE;
			len = rcvlen;
		}
errout:
		if (error && error != EINTR && error != ERESTART) {
			mbuf_freem(*mp);
			*mp = NULL;
			if (error != EPIPE && rep->r_nmp != NULL)
				log(LOG_INFO,
				    "receive error %d from nfs server %s\n", error,
				    vfs_statfs(rep->r_nmp->nm_mountp)->f_mntfromname);
			error = newnfs_sndlock(&rep->r_nrp->nr_lock, rep);
			if (!error) {
				error = nfs_reconnect(rep, cred);
				if (!error)
					goto tryagain;
				newnfs_sndunlock(&rep->r_nrp->nr_lock);
			}
		}
	} else {
		/*
		 * We could have failed while rebinding the datagram socket
		 * so we need to attempt to rebind here.
		 */
		if ((so = (socket_t)rep->r_nrp->nr_so) == NULL) {
			error = newnfs_sndlock(&rep->r_nrp->nr_lock, rep);
			if (!error) {
				error = nfs_reconnect(rep, cred);
				newnfs_sndunlock(&rep->r_nrp->nr_lock);
			}
			if (error)
				return (error);
			if (rep->r_flags & R_UNMOUNT) /* if unmounted then bailout */
				return (ENXIO);
			so = (socket_t)rep->r_nrp->nr_so;
		}
		bzero(&msg, sizeof(msg));
		len = 0;
		do {
			rcvlen = 1000000;
			error = sock_receivembuf(so, &msg, mp, 0, &rcvlen);
			if (rep->r_flags & R_UNMOUNT) /* if unmounted then bailout */
				goto shutout;
			if (error) {
				error2 = newnfs_sigintr(rep->r_nmp, rep, p);
				if (error2) {
					error = error2;
					goto shutout;
				}
			}
			/* Reconnect for all errors.  We may be receiving
			 * soft/hard/blocking errors because of a network
			 * change.
			 * XXX: we should rate limit or delay this
			 * to once every N attempts or something.
			 * although TCP doesn't seem to.
			 */
			if (error) {
DEBUGSPRINTF("at sndlock4\n");
				error2 = newnfs_sndlock(&rep->r_nrp->nr_lock, rep);
DEBUGSPRINTF("aft sndlck4 err=%d\n",error2);
				if (!error2) {
DEBUGSPRINTF("at reconn\n");
					error2 = nfs_reconnect(rep, cred);
DEBUGSPRINTF("aft reconn err=%d\n",error2);
					if (error2)
						error = error2;
					else if (rep->r_flags & R_UNMOUNT) /* if unmounted then bailout */
						error = ENXIO;
					else
						so = (socket_t)rep->r_nrp->nr_so;
DEBUGSPRINTF("at sndunlck5\n");
					newnfs_sndunlock(&rep->r_nrp->nr_lock);
DEBUGSPRINTF("aft sndunlck5\n");
				} else {
					error = error2;
				}
			}
		} while (error == EWOULDBLOCK);
	}
shutout:
	if (error) {
		mbuf_freem(*mp);
		*mp = NULL;
DEBUGSPRINTF("nfs_rcv err=%d\n",error);
	}
	return (error);
}

/*
 * Implement receipt of reply on a socket.
 * We must search through the list of received datagrams matching them
 * with outstanding requests using the xid, until ours is found.
 */
static int
nfs_reply(struct nfsreq *myrep, struct nfsrv_descript *nd,
    struct ucred *cred, proc_t p)
{
	register struct nfsreq *rep;
	register struct nfssockreq *nrp = myrep->r_nrp;
	register int32_t t1;
	struct nfsmount *nmp;
	u_int32_t rxid, *tl;
	int error, clearerror, optlen;

	/*
	 * Loop around until we get our own reply
	 */
	for (;;) {
		/*
		 * Lock against other receivers so that I don't get stuck in
		 * sbwait() after someone else has received my reply for me.
		 * Also necessary for connection based protocols to avoid
		 * race conditions during a reconnect.
		 */
		error = nfs_rcvlock(myrep);
		if (error == EALREADY) {
			nd->nd_mrep = myrep->r_mrep;
			nd->nd_md = myrep->r_md;
			nd->nd_dpos = myrep->r_dpos;
			return (0);
		}
		if (error) {
DEBUGSPRINTF("repl1 reterr=%d\n",error);
			return (error);
		}
		/* Already received, bye bye */
		if (myrep->r_mrep != NULL) {
			nfs_rcvunlock(myrep);
			nd->nd_mrep = myrep->r_mrep;
			nd->nd_md = myrep->r_md;
			nd->nd_dpos = myrep->r_dpos;
			return (0);
		}
		/*
		 * Get the next Rpc reply off the socket
		 */
		error = nfs_receive(myrep, &nd->nd_mrep, cred, p);
		if (myrep->r_flags & R_UNMOUNT) {
			nfs_rcvunlock(myrep);
			if (nd->nd_mrep != NULL)
				mbuf_freem(nd->nd_mrep);
DEBUGSPRINTF("ret1 enxio\n");
			return (ENXIO);
		}
		if (error) {
DEBUGSPRINTF("at rcvunlck err=%d\n",error);
			nfs_rcvunlock(myrep);
DEBUGSPRINTF("aft rcvunlck\n");
			if (myrep->r_flags & R_UNMOUNT) {
				if (nd->nd_mrep != NULL)
					mbuf_freem(nd->nd_mrep);
DEBUGSPRINTF("ret2 enxio\n");
				return (ENXIO);
			}
			/*
			 * Ignore routing errors on connectionless protocols?
			 */
			if (NFSIGNORE_SOERROR(nrp->nr_sotype, error)) {
DEBUGSPRINTF("ign soerr\n");
				if (nrp->nr_so != NULL) {
					optlen = sizeof (clearerror);
DEBUGSPRINTF("getsockopt\n");
					sock_getsockopt((socket_t)nrp->nr_so, SOL_SOCKET,
					    SO_ERROR, &clearerror, &optlen);
DEBUGSPRINTF("aft getsockopt\n");
				}
				if (myrep->r_flags & R_GETONEREP) {
DEBUGSPRINTF("getonerep ret 0\n");
					return (0);
				}
				continue;
			}
			if (nd->nd_mrep != NULL)
				mbuf_freem(nd->nd_mrep);
DEBUGSPRINTF("ret3 err=%d\n",error);
			return (error);
		}
		if (nd->nd_mrep == NULL) {
			nfs_rcvunlock(myrep);
DEBUGSPRINTF("rcv null rflg=0x%x\n",myrep->r_flags);
			if (myrep->r_flags & R_SOFTTERM) {
DEBUGSPRINTF("ret1 eintr\n");
				return (EINTR);
			}
DEBUGSPRINTF("ret4 enxio\n");
			return (ENXIO);
		}
	
		/*
		 * Get the xid and check that it is an rpc reply
		 */
		nd->nd_md = nd->nd_mrep;
		nd->nd_dpos = (caddr_t)mbuf_data(nd->nd_md);
		NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
		rxid = *tl++;
		if (*tl != newrpc_reply) {
DEBUGSPRINTF("not reply\n");
			OSAddAtomic(1, (SInt32 *)&newnfsstats.rpcinvalid);
			mbuf_freem(nd->nd_mrep);
nfsmout:
			nfs_rcvunlock(myrep);
			if (myrep->r_flags & R_GETONEREP)
				return (0);
			continue;
		}

		/*
		 * Loop through the request list to match up the reply
		 * Iff no match, just drop the datagram
		 */
		lck_mtx_lock(nfs_req_slock);
		TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) {
			if (rep->r_mrep == NULL && rxid == rep->r_xid) {
				if (nfstest_dropreplies) {
				    if ((arc4random() % nfstest_dropreplies) == 0) {
					rep = NULL;
					break;
				    }
				}
				/* Found it.. */
				rep->r_mrep = nd->nd_mrep;
				rep->r_md = nd->nd_md;
				rep->r_dpos = nd->nd_dpos;
				rep->r_flags &= ~R_SENT;
				if (rep->r_nmp != NULL) {
				    nmp = rep->r_nmp;
				    if (newnfsrtton) {
					struct rttl *rt;

					rt = &newnfsrtt.rttl[newnfsrtt.pos];
					rt->proc = rep->r_procnum;
					rt->rto = NFS_RTO(nmp, proct[rep->r_procnum]);
					rt->sent = nmp->nm_sent;
					rt->cwnd = nmp->nm_cwnd;
					rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1];
					rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1];
					rt->fsid = vfs_statfs(nmp->nm_mountp)->f_fsid;
					NFSGETTIME(&rt->tstamp);
					if (rep->r_flags & R_TIMING)
						rt->rtt = rep->r_rtt;
					else
						rt->rtt = 1000000;
					newnfsrtt.pos = (newnfsrtt.pos + 1) % NFSRTTLOGSIZ;
				    }
				    /*
				     * Update congestion window.
				     * Do the additive increase of
				     * one rpc/rtt.
				     */
				    if (nmp->nm_cwnd <= nmp->nm_sent) {
					nmp->nm_cwnd +=
					   (NFS_CWNDSCALE * NFS_CWNDSCALE +
					   (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
					if (nmp->nm_cwnd > NFS_MAXCWND)
						nmp->nm_cwnd = NFS_MAXCWND;
				    }
				    nmp->nm_sent -= NFS_CWNDSCALE;
				    /*
				     * Update rtt using a gain of 0.125 on the mean
				     * and a gain of 0.25 on the deviation.
				     */
				    if (rep->r_flags & R_TIMING) {
					/*
					 * Since the timer resolution of
					 * NFS_HZ is so course, it can often
					 * result in r_rtt == 0. Since
					 * r_rtt == N means that the actual
					 * rtt is between N+dt and N+2-dt ticks,
					 * add 1.
					 */
					t1 = rep->r_rtt + 1;
					t1 -= (NFS_SRTT(rep) >> 3);
					NFS_SRTT(rep) += t1;
					if (t1 < 0)
						t1 = -t1;
					t1 -= (NFS_SDRTT(rep) >> 2);
					NFS_SDRTT(rep) += t1;
				    }
				    nmp->nm_timeouts = 0;
				}
				break;
			}
		}
		lck_mtx_unlock(nfs_req_slock);
		nfs_rcvunlock(myrep);
		/*
		 * If not matched to a request, drop it.
		 * If it's mine, get out.
		 */
		if (rep == 0) {
			OSAddAtomic(1, (SInt32 *)&newnfsstats.rpcunexpected);
			mbuf_freem(nd->nd_mrep);
		} else if (rep == myrep) {
			if (rep->r_mrep == NULL)
				panic("nfsreply nil");
			return (0);
		}
		if (myrep->r_flags & R_GETONEREP) {
DEBUGSPRINTF("getonerep2 ret 0\n");
			return (0);
		}
	}
}

static int
nfs_rcvlock(struct nfsreq *rep)
{
	int *flagp = &rep->r_nrp->nr_lock;
	int error, slpflag, slptimeo = 0;
	struct timespec ts;

	lck_mtx_lock(nfs_sockl_mutex);
	/* make sure we still have our mountpoint */
	if (rep->r_flags & R_UNMOUNT) {
DEBUGSPRINTF("nfs_rcvlck umnt\n");
		if (rep->r_mrep != NULL) {
			lck_mtx_unlock(nfs_sockl_mutex);
			return (EALREADY);
		}
		lck_mtx_unlock(nfs_sockl_mutex);
		return (ENXIO);
	}

	if (rep->r_nmp != NULL &&
	    (rep->r_nmp->nm_flag & (NFSMNT_INT | NFSMNT_SOFT)))
		slpflag = PCATCH;
	else
		slpflag = 0;
	while (*flagp & NFSR_RCVLOCK) {
		if ((error = newnfs_sigintr(rep->r_nmp, rep, rep->r_procp))) {
			lck_mtx_unlock(nfs_sockl_mutex);
DEBUGSPRINTF("nfs_rcvlck err=%d\n",error);
			return (error);
		} else if (rep->r_mrep != NULL) {
			lck_mtx_unlock(nfs_sockl_mutex);
			/*
			 * Don't bother sleeping if reply already arrived
			 */
			return (EALREADY);
		}
		*flagp |= NFSR_WANTRCV;
		/*
		 * We need to poll if we're P_NOREMOTEHANG so that we
		 * call newnfs_sigintr periodically above.
		 */
		if (rep->r_procp != NULL &&
		    (proc_noremotehang(rep->r_procp)) != 0)
			slptimeo = 1;
		ts.tv_sec = slptimeo;
		ts.tv_nsec = 0;
DEBUGSPRINTF("lckrcv tsl\n");
		msleep((caddr_t)flagp, nfs_sockl_mutex, slpflag | (PZERO + 1),
		    "nfsrcvlk", &ts);
DEBUGSPRINTF("aft lckrcv tsl\n");
		if (slpflag == PCATCH) {
			slpflag = 0;
			slptimeo = 2;
		}
		/*
		 * Make sure while we slept that the mountpoint didn't go away.
		 * newnfs_sigintr and caller nfs_reply expect it intact.
		 */
		if (rep->r_flags & R_UNMOUNT)  {
			lck_mtx_unlock(nfs_sockl_mutex);
DEBUGSPRINTF("nfs_rcvlck umnt2\n");
			return (ENXIO); /* don't have lock until out of loop */
		}
	}
	/*
	 * nfs_reply will handle it if reply already arrived.
	 * (We may have slept or been preempted).
	 */
	*flagp |= NFSR_RCVLOCK;
	lck_mtx_unlock(nfs_sockl_mutex);
	return (0);
}

/*
 * Unlock the stream socket for others.
 */
static void
nfs_rcvunlock(struct nfsreq *rep)
{
	int *flagp;
	
	flagp = &rep->r_nrp->nr_lock;

	lck_mtx_lock(nfs_sockl_mutex);
	if ((*flagp & NFSR_RCVLOCK) == 0)
#ifdef notnow
		panic("nfs rcvunlock");
#else
		printf("panic rcvunlck\n");
#endif
	*flagp &= ~NFSR_RCVLOCK;
	if (*flagp & NFSR_WANTRCV) {
		*flagp &= ~NFSR_WANTRCV;
		wakeup((caddr_t)flagp);
	}
	lck_mtx_unlock(nfs_sockl_mutex);
}

/*
 * Lock a socket against others.
 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
 * and also to avoid race conditions between the processes with nfs requests
 * in progress when a reconnect is necessary.
 */
APPLESTATIC int
newnfs_sndlock(int *flagp, struct nfsreq *rep)
{
	NFSPROC_T *p;
	int slptimeo = 0, error;
	struct nfsmount *nmp;
	struct timespec ts;

	lck_mtx_lock(nfs_sockl_mutex);
	while (*flagp & NFSR_SNDLOCK) {
		if (rep) {
			p = rep->r_procp;
			nmp = rep->r_nmp;
			if (nmp && (nmp->nm_flag & (NFSMNT_INT | NFSMNT_SOFT)))
				slptimeo = 2;
		} else {
			p = NULL;
			nmp = NULL;
		}
		error = newnfs_sigintr(nmp, rep, p);
		if (error) {
			lck_mtx_unlock(nfs_sockl_mutex);
DEBUGSPRINTF("nfs_sndlck err=%d\n",error);
			if (error == ENXIO)
				return (error);
			return (EINTR);
		}
		*flagp |= NFSR_WANTSND;
		ts.tv_sec = slptimeo;
		ts.tv_nsec = 0;
DEBUGSPRINTF("sndlck tsl\n");
		(void) msleep((caddr_t)flagp, nfs_sockl_mutex, PZERO - 1,
		    "nfsndlck", &ts);
DEBUGSPRINTF("aft sndlck tsl\n");
	}
	*flagp |= NFSR_SNDLOCK;
	lck_mtx_unlock(nfs_sockl_mutex);
	return (0);
}

/*
 * Unlock the stream socket for others.
 */
APPLESTATIC void
newnfs_sndunlock(int *flagp)
{

	lck_mtx_lock(nfs_sockl_mutex);
	if ((*flagp & NFSR_SNDLOCK) == 0)
#ifdef notnow
		panic("nfs sndunlock");
#else
		printf("LCKsnd unlock\n");
#endif
	*flagp &= ~NFSR_SNDLOCK;
	if (*flagp & NFSR_WANTSND) {
		*flagp &= ~NFSR_WANTSND;
		wakeup((caddr_t)flagp);
	}
	lck_mtx_unlock(nfs_sockl_mutex);
}

/*
 * NFS server system calls
 */

/*
 * Nfs server psuedo system call for the nfsd's
 * Based on the flag value it either:
 * - adds a socket to the selection list
 * - remains in the kernel as an nfsd
 * - remains in the kernel as an nfsiod
 * For INET6 we suppose that nfsd provides only IN6P_IPV6_V6ONLY sockets
 * and that mountd provides
 *  - sockaddr with no IPv4-mapped addresses
 *  - mask for both INET and INET6 families if there is IPv4-mapped overlap
 */
int
#ifdef APPLEKEXT
newnfssvc(struct proc *p, struct nfssvc_args *uap, __unused int *retval)
#else
nfssvc(struct proc *p, struct nfssvc_args *uap, __unused int *retval)
#endif
{
	mbuf_t nam;
#ifdef NFSD
	struct user_nfsd_args user_nfsdarg;
#endif
	struct user_nfscbd_args user_nfscbdarg;
	socket_t so;
	int error;
	static int nfsrvdinit = 1;

#ifndef APPLEKEXT
	AUDIT_ARG(cmd, uap->flag);
#endif

	/*
	 * Must be super user
	 */
	error = proc_suser(p);
	if (error)
		return (error);
	if (nfsrvdinit) {
		nfsrvdinit = 0;
		newnfs_portinit();
#ifdef NFSCL
		nfscl_init();
#endif
#ifdef NFSD
		nfsvno_init();
		nfsrvd_init(0, p);	/* Init server data structures */
#endif
#ifdef NFSCL
		nfsrvd_cbinit(0, p);	/* Init cb server data structures */
#endif
	}
	NFSD_LOCK();
#ifdef NFSD
	while (newnfs_sockhead_flag & SLP_INIT) {
		newnfs_sockhead_flag |= SLP_WANTINIT;
		(void) nfsmsleep((caddr_t)&newnfs_sockhead, NFSDLOCKMUTEXPTR,
		    PSOCK, "nfsd init", NULL);
	}
#endif
#ifdef NFSCL
	while (newnfscb_sockhead_flag & SLP_INIT) {
		newnfscb_sockhead_flag |= SLP_WANTINIT;
		(void) nfsmsleep((caddr_t)&newnfscb_sockhead, NFSDLOCKMUTEXPTR,
		    PSOCK, "nfscbd init", NULL);
	}
#endif
	NFSD_UNLOCK();
	if (uap->flag & NFSSVC_ADDSOCK) {
#ifndef NFSD
		error = ENOSYS;
#else
		if (IS_64BIT_PROCESS(p)) {
			error = copyin(uap->argp, (caddr_t)&user_nfsdarg, sizeof(user_nfsdarg));
		} else {
			struct nfsd_args    tmp_args;
			error = copyin(uap->argp, (caddr_t)&tmp_args, sizeof(tmp_args));
			if (error == 0) {
				user_nfsdarg.sock = tmp_args.sock;
				user_nfsdarg.name = CAST_USER_ADDR_T(tmp_args.name);
				user_nfsdarg.namelen = tmp_args.namelen;
			}
		}
		if (error)
			return (error);
		/* get the socket */
		error = file_socket(user_nfsdarg.sock, &so);
		if (error)
			return (error);
		/* Get the client address for connected sockets. */
		if (user_nfsdarg.name == USER_ADDR_NULL || user_nfsdarg.namelen == 0) {
			nam = NULL;
		} else {
			error = sockargs(&nam, user_nfsdarg.name, user_nfsdarg.namelen, MBUF_TYPE_SONAME);
			if (error) {
				/* drop the iocount file_socket() grabbed on the file descriptor */
				file_drop(user_nfsdarg.sock);
				return (error);
			}
		}
		/*
		 * newnfs_addsock() will grab a retain count on the socket
		 * to keep the socket from being closed when nfsd closes its
		 * file descriptor for it.
		 */
		error = newnfs_addsock(so, nam);
		/* drop the iocount file_socket() grabbed on the file descriptor */
		file_drop(user_nfsdarg.sock);
#endif	/* !NFSD */
	} else if (uap->flag & NFSSVC_CBADDSOCK) {
#ifndef NFSCL
		error = ENOSYS;
#else
		if (IS_64BIT_PROCESS(p)) {
			error = copyin(uap->argp, (caddr_t)&user_nfscbdarg, sizeof(user_nfscbdarg));
		} else {
			struct nfscbd_args    tmp_args;
			error = copyin(uap->argp, (caddr_t)&tmp_args, sizeof(tmp_args));
			if (error == 0) {
				user_nfscbdarg.sock = tmp_args.sock;
				user_nfscbdarg.name = CAST_USER_ADDR_T(tmp_args.name);
				user_nfscbdarg.namelen = tmp_args.namelen;
				user_nfscbdarg.port = tmp_args.port;
			}
		}
		if (error)
			return (error);
		/* get the socket */
		error = file_socket(user_nfscbdarg.sock, &so);
		if (error)
			return (error);
		/* Get the client address for connected sockets. */
		if (user_nfscbdarg.name == USER_ADDR_NULL || user_nfscbdarg.namelen == 0) {
			nam = NULL;
		} else {
			error = sockargs((struct mbuf **)&nam, user_nfscbdarg.name, user_nfscbdarg.namelen, MBUF_TYPE_SONAME);
			if (error) {
				/* drop the iocount file_socket() grabbed on the file descriptor */
				file_drop(user_nfscbdarg.sock);
				return (error);
			}
		}
		/*
		 * newnfs_cbaddsock() will grab a retain count on the socket
		 * to keep the socket from being closed when nfsd closes its
		 * file descriptor for it.
		 */
		error = newnfs_cbaddsock(so, nam);
		if (!error && nfscl_enablecallb == 0) {
			nfsv4_cbport = user_nfscbdarg.port;
			nfscl_enablecallb = 1;
		}
		/* drop the iocount file_socket() grabbed on the file descriptor */
		file_drop(user_nfscbdarg.sock);
#endif	/* !NFSCL */
	} else if (uap->flag & NFSSVC_MNTD) {
		error = ENOSYS;
	} else if (uap->flag & NFSSVC_NFSD) {
#ifndef	NFSD
		error = ENOSYS;
#else
		error = nfsrvd_nfsd(p);
#endif /* !NFSD */
	} else if (uap->flag & NFSSVC_NFSCBD) {
#ifndef	NFSCL
		error = ENOSYS;
#else
		error = nfsrvd_nfscbd(p);
#endif /* NFSCL */
	} else if (uap->flag & NFSSVC_RENEWTHREAD) {
#if !defined(APPLEKEXT) || APPLEKEXT < 1050
		error = ENOSYS;
#else
		error = nfscl_renewproc(p, uap);
#endif /* APPLEKEXT == 1050 */
#if defined(APPLEKEXT) && APPLEKEXT >= 1050
	} else if (uap->flag & NFSSVC_BIOD) {
		error = nfscl_asyncioproc(p, uap);
#endif /* APPLEKEXT == 1050 */
	} else {
		error = nfssvc_call(p, uap, kauth_cred_get());
	}
	if (error == EINTR || error == ERESTART)
		error = 0;
	return (error);
}

/*
 * Shut down a socket associated with an nfssvc_sock structure.
 * Should be called with the send lock set, if required.
 * The trick here is to increment the sref at the start, so that the nfsds
 * will stop using it and clear ns_flag at the end so that it will not be
 * reassigned during cleanup.
 */
APPLESTATIC void
nfsrvd_zapsock(struct nfssvc_sock *slp)
{
	socket_t so;

	if ((slp->ns_flag & SLP_VALID) == 0)
		return;
	slp->ns_flag &= ~SLP_ALLFLAGS;

	so = slp->ns_so;
	if (so == NULL)
		return;

	/*
	 * Attempt to deter future upcalls, but leave the
	 * upcall info in place to avoid a race with the
	 * networking code.
	 */
	socket_lock((struct socket *)so, 1);
	((struct socket *)so)->so_rcv.sb_flags &= ~SB_UPCALL;
	socket_unlock((struct socket *)so, 1);

	sock_shutdown(so, SHUT_RDWR);
}

static int
nfsrv_msg(proc_t p,
	char *server,
	const char *msg,
	int error)
{
	tpr_t tpr;

	if (p)
		tpr = tprintf_open(p);
	else
		tpr = NULL;
	if (error)
		tprintf(tpr, "nfs server %s: %s, error %d\n", server, msg,
		    error);
	else
		tprintf(tpr, "nfs server %s: %s\n", server, msg);
	tprintf_close(tpr);
	return (0);
}


/*
 * Called by nfssvc() for nfsds. Just loops around servicing rpc requests
 * until it is killed by a signal.
 */
#ifdef NFSD
APPLESTATIC int
nfsrvd_nfsd(NFSPROC_T *p)
{
	mbuf_t m;
	int siz;
	struct nfssvc_sock *slp;
	struct nfsd *nfsd;
	struct nfsrv_descript *nd;
	struct nfsrvcache *rp;
	int error = 0, cacherep = RC_DOIT;
	boolean_t funnel_state;

	MALLOC(nfsd, struct nfsd *, sizeof (struct nfsd), M_NFSD, M_WAITOK);
	NFSBZERO((caddr_t)nfsd, sizeof (struct nfsd));
	MALLOC(nd, struct nfsrv_descript *, sizeof (struct nfsrv_descript),
	    M_NFSNFSDDESC, M_WAITOK);
	nd->nd_cred = newnfs_getcred();
	NFSD_LOCK();
	TAILQ_INSERT_TAIL(&newnfsd_head, nfsd, nfsd_chain);
	newnfs_numnfsd++;
	NFSD_UNLOCK();

	funnel_state = thread_funnel_set(kernel_flock, FALSE);

	/*
	 * Loop getting rpc requests until SIGKILL.
	 */
	for (;;) {
		if ((nfsd->nfsd_flag & NFSD_REQINPROG) == 0) {
			NFSD_LOCK();
			while (nfsd->nfsd_slp == NULL &&
			    (newnfsd_head_flag & NFSD_CHECKSLP) == 0) {
				nfsd->nfsd_flag |= NFSD_WAITING;
				nfsd_waiting++;
				error = nfsmsleep((caddr_t)nfsd,
				    NFSDLOCKMUTEXPTR, PSOCK | PCATCH,
				    "nfsd", 0);
				nfsd_waiting--;
				if (error)
					goto done;
			}
			if (nfsd->nfsd_slp == NULL &&
			    (newnfsd_head_flag & NFSD_CHECKSLP) != 0) {
				TAILQ_FOREACH(slp, &newnfs_sockhead, ns_chain) {
				    lck_rw_lock_shared(&slp->ns_rwlock);
				    if ((slp->ns_flag & (SLP_VALID | SLP_DOREC))
					== (SLP_VALID | SLP_DOREC)) {
					    if (lck_rw_lock_shared_to_exclusive(&slp->ns_rwlock)) {
						/* upgrade failed and we lost the lock; take exclusive and recheck */
						lck_rw_lock_exclusive(&slp->ns_rwlock);
						if ((slp->ns_flag & (SLP_VALID | SLP_DOREC))
						    != (SLP_VALID | SLP_DOREC)) {
						    /* flags no longer set, so skip this socket */
						    lck_rw_done(&slp->ns_rwlock);
						    continue;
						}
					    }
					    slp->ns_flag &= ~SLP_DOREC;
					    slp->ns_sref++;
					    nfsd->nfsd_slp = slp;
					    lck_rw_done(&slp->ns_rwlock);
					    break;
				    }
				    lck_rw_done(&slp->ns_rwlock);
				}
				if (slp == NULL)
					newnfsd_head_flag &= ~NFSD_CHECKSLP;
			}
			NFSD_UNLOCK();
			if ((slp = nfsd->nfsd_slp) == NULL)
				continue;
			lck_rw_lock_exclusive(&slp->ns_rwlock);
			if (slp->ns_flag & SLP_VALID) {
				if ((slp->ns_flag & (SLP_NEEDQ|SLP_DISCONN)) == SLP_NEEDQ) {
					slp->ns_flag &= ~SLP_NEEDQ;
					nfsrvd_rcv_locked(slp->ns_so, slp, MBUF_WAITOK);
				}
				if (slp->ns_flag & SLP_DISCONN)
					nfsrvd_zapsock(slp);
				error = nfsrvd_dorec(slp, nfsd, nd, p, 0);
				nfsd->nfsd_flag |= NFSD_REQINPROG;
			}
			lck_rw_done(&slp->ns_rwlock);
		} else {
			error = 0;
			slp = nfsd->nfsd_slp;
		}
		if (error || (slp->ns_flag & SLP_VALID) == 0) {
			nfsd->nfsd_slp = NULL;
			nfsd->nfsd_flag &= ~NFSD_REQINPROG;
			nfsrv_slpderef(slp, 0);
			continue;
		}
		if (nd) {
		    if (slp->ns_sotype == SOCK_STREAM)
			nd->nd_flag |= ND_STREAMSOCK;
		    NFSGETTIME(&nd->nd_starttime);
		    if (nd->nd_nam2)
			nd->nd_nam = nd->nd_nam2;
		    else
			nd->nd_nam = slp->ns_nam;

		    /*
		     * Several cases, as set up by nfsrvd_dorec().
		     * 1 - nd_repstat == NFSERR_DONTREPLY, which means
		     *     just silently drop the request
		     * 2 - nd_repstat set to some other error, which
		     *     means that an RPC layer error has occurred,
		     *     so generate the reply now
		     * 3 - nd_mreq not NULL, which means the reply has
		     *     already been created, via nfsgss_continit()
		     * 4 - nd_procnum == NFSPROC_NULL with nd_mreq == NULL,
		     *     is a normal null rpc, so just create reply
		     * 5 - nd_repstat == 0 && nd_mreq == NULL, which
		     *     means a normal nfs rpc, so check the cache
		     */
		    if (nd->nd_repstat == NFSERR_DONTREPLY)
			cacherep = RC_DROPIT;
		    else if (nd->nd_repstat || nd->nd_mreq)
			cacherep = RC_REPLY;
		    else if (nd->nd_procnum == NFSPROC_NULL) {
			nd->nd_repstat = NFSERR_RETVOID;
			NFSINCRGLOBAL(newnfsstats.srvrpccnt[nd->nd_procnum]);
			cacherep = RC_REPLY;
		    } else
			cacherep = nfsrvd_getcache(nd);
		}

		/*
		 * Handle the request. There are three cases.
		 * RC_DOIT - do the RPC
		 * RC_REPLY - return the reply generated by nfsrvd_getcache()
		 * RC_DROPIT - just throw the request away
		 */
		rp = NULL;
		if (cacherep == RC_DOIT) {
			lck_rw_lock_shared(&newnfs_export_rwlock);
			nfsrvd_dorpc(nd, slp, p);
			lck_rw_done(&newnfs_export_rwlock);
			if (nd->nd_repstat == NFSERR_DONTREPLY) {
				if (nd->nd_mreq)
					mbuf_freem(nd->nd_mreq);
				cacherep = RC_DROPIT;
			}
			rp = nfsrvd_updatecache(nd);
		}
		switch (cacherep) {
		    case RC_DOIT:
		    case RC_REPLY:
			nfsrv_replyheader(nd, slp, p);
			m = nd->nd_mreq;
			siz = 0;
			while (m) {
				siz += mbuf_len(m);
				m = mbuf_next(m);
			}
DEBUG2PRINTF("reply len=%d\n",siz);
			if (siz <= 0) {
				printf("mbuf siz=%d\n",siz);
				panic("Bad nfsd reply size");
			} else if (siz > NFS_MAXPACKET) {
				printf("nfs reply siz=%d TOO BIG, ", siz);
				printf("increase NFS_MAXPACKET\n");
			}
			m = nd->nd_mreq;
			mbuf_pkthdr_setlen(m, siz);
			error = mbuf_pkthdr_setrcvif(m, NULL);
			if (error)
				panic("nfsd setrcvif failed: %d", error);
			/*
			 * For stream protocols, prepend a Sun RPC
			 * Record Mark.
			 */
			if (slp->ns_sotype == SOCK_STREAM) {
				error = mbuf_prepend(&m, NFSX_UNSIGNED, MBUF_WAITOK);
				if (!error)
					*(u_long*)mbuf_data(m) = htonl(0x80000000 | siz);
			}
			if (!error) {
				if (slp->ns_flag & SLP_VALID)
				    error = newnfs_send(slp->ns_so, nd->nd_nam2, m, NULL, p);
				else
				    error = EPIPE;
			}
			if (error)
				mbuf_freem(m);
			if (rp)
				nfsrvd_sentcache(rp, slp->ns_so, error);
			if (newnfsrtton)
				nfsrvd_rt(slp->ns_sotype, nd, cacherep);
			if (nd->nd_nam2)
				mbuf_freem(nd->nd_nam2);
			if (nd->nd_mrep)
				mbuf_freem(nd->nd_mrep);
			if (error == EPIPE) {
				lck_rw_lock_exclusive(&slp->ns_rwlock);
				nfsrvd_zapsock(slp);
				lck_rw_done(&slp->ns_rwlock);
			}
			if (error == EINTR || error == ERESTART) {
				nfsrv_slpderef(slp, 0);
				NFSD_LOCK();
				goto done;
			}
			break;
		    case RC_DROPIT:
			if (newnfsrtton)
				nfsrvd_rt(slp->ns_sotype, nd, cacherep);
			mbuf_freem(nd->nd_mrep);
			if (nd->nd_nam2)
				NFSSOCKADDRFREE(nd->nd_nam2);
			break;
		};

		lck_rw_lock_exclusive(&slp->ns_rwlock);
		if (nfsrvd_dorec(slp, nfsd, nd, p, 0)) {
			lck_rw_done(&slp->ns_rwlock);
			nfsd->nfsd_flag &= ~NFSD_REQINPROG;
			nfsd->nfsd_slp = NULL;
			nfsrv_slpderef(slp, 0);
		} else {
			lck_rw_done(&slp->ns_rwlock);
		}
	}
done:
	TAILQ_REMOVE(&newnfsd_head, nfsd, nfsd_chain);
	if (--newnfs_numnfsd == 0) {
		NFSD_UNLOCK();
		nfsrvd_init(TRUE, p);	/* Reinitialize everything */
	} else {
		NFSD_UNLOCK();
	}
	NFSFREECRED(nd->nd_cred);
	FREE((caddr_t)nd, M_NFSNFSDDESC);
	FREE((caddr_t)nfsd, M_NFSD);
	return (error);
}
#endif	/* NFSD */

/*
 * Look up a file name. Basically just initialize stuff and call namei().
 */
APPLESTATIC int
nfsrv_lookupfilename(struct nameidata *ndp, char *fname, NFSPROC_T *p)
{
	struct vfs_context context;
	int error;

	context.vc_thread = current_thread();
	context.vc_ucred = kauth_cred_get();
	NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE, CAST_USER_ADDR_T(fname), &context);
	error = namei(ndp);
	if (!error)
		nameidone(ndp);
	return (error);
}

void
newnfs_timer_funnel(arg)
	void *arg;
{
	(void) thread_funnel_set(kernel_flock, TRUE);
	newnfs_timer(arg);
	(void) thread_funnel_set(kernel_flock, FALSE);

}

APPLESTATIC int32_t
nfsrv_retuptime(void)
{
	struct timeval now;

	microuptime(&now);
	return (now.tv_sec);
}

#ifdef notyet
/*
 * For darwin, just fill the sockaddr into an mbuf and then do the
 * vfs call.
 */
APPLESTATIC int
nfsdarwin_fhtovp(struct mount *mp, struct fid *fidp, struct sockaddr *saddr,
    struct vnode **vpp, int *exflagsp, struct ucred **credanonp)
{
	register struct mbuf *m;
	int error;

	if (saddr) {
		MGET(m, M_WAIT, MT_SONAME);
		m->m_len = saddr->sa_len;
		bcopy((caddr_t)saddr, mtod(m, caddr_t), saddr->sa_len);
	} else {
		m = NULL;
	}
	error = VFS_FHTOVP(mp, fidp, m, vpp, exflagsp, credanonp);
	if (m)
		m_free(m);
	return (error);
}

/*
 * For darwin, just fill the sockaddr into an mbuf and then do the
 * vfs call.
 */
APPLESTATIC struct netcred *
nfsdarwin_export_lookup(struct mount *mp, struct netexport *nep,
    struct sockaddr *saddr)
{
	register struct mbuf *m;
	struct netcred *ret;

	if (saddr) {
		MGET(m, M_WAIT, MT_SONAME);
		m->m_len = saddr->sa_len;
		bcopy((caddr_t)saddr, mtod(m, caddr_t), saddr->sa_len);
	} else {
		m = NULL;
	}
	ret = vfs_export_lookup(mp, nep, m);
	if (m)
		m_free(m);
	return (ret);
}

/*
 * XXX assumes a ufs file system type!!
 */
APPLESTATIC int
nfsdarwin_check_export(struct mount *mp, struct sockaddr *saddr,
    int *exflagsp, struct ucred **credanonp)
{
	register struct netcred *np;
	register struct ufsmount *ump = VFSTOUFS(mp);

	/*
	 * Get the export permission structure for this <mp, client> tuple.
	 */
	np = nfsdarwin_export_lookup(mp, &ump->um_export, saddr);
	if (np == NULL)
		return (EACCES);

	*exflagsp = np->netc_exflags;
	*credanonp = &np->netc_anon;
	return (0);
}
#endif

/*
 * Adds a socket to the list for servicing by nfsds.
 */
#ifdef NFSD
static int
newnfs_addsock(socket_t so, mbuf_t mynam)
{
	int siz;
	struct nfssvc_sock *slp;
	struct nfssvc_sock *tslp = NULL;
	int error, sodomain, sotype, soprotocol, on = 1;
	struct timeval timeo;
	static u_int64_t sockref = 0;

	/* make sure mbuf constants are set up */
	if (!ncl_mbuf_mlen)
		newncl_mbuf_init();

	sock_gettype(so, &sodomain, &sotype, &soprotocol);

	/*
	 * Add it to the list, as required.
	 */
	if (soprotocol == IPPROTO_UDP) {
		tslp = newnfs_udpsock;
		if (!tslp || (tslp->ns_flag & SLP_VALID)) {
			mbuf_freem(mynam);
			return (EPERM);
		}
		lck_rw_init(&tslp->ns_rwlock, nfs_slp_rwlock_group, nfs_slp_lock_attr);
	}
	/* reserve buffer space for 2 maximally-sized packets */
	siz = NFS_MAXPACKET;
	if (sotype == SOCK_STREAM)
		siz += sizeof (u_long);
	siz *= 2;
	if (siz > NFS_MAXSOCKBUF)
		siz = NFS_MAXSOCKBUF;
	if ((error = sock_setsockopt(so, SOL_SOCKET, SO_SNDBUF, &siz, sizeof(siz))) ||
	    (error = sock_setsockopt(so, SOL_SOCKET, SO_RCVBUF, &siz, sizeof(siz)))) {
		mbuf_freem(mynam);
		return (error);
	}

	/*
	 * Set protocol specific options { for now TCP only } and
	 * reserve some space. For datagram sockets, this can get called
	 * repeatedly for the same socket, but that isn't harmful.
	 */
	if (sotype == SOCK_STREAM) {
		sock_setsockopt(so, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on));
	}
	if (sodomain == AF_INET && soprotocol == IPPROTO_TCP) {
		sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
	}

	sock_nointerrupt(so, 0);

	timeo.tv_usec = 0;
	timeo.tv_sec = 0;
	error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
	error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));

	if (tslp) {
		slp = tslp;
		lck_mtx_lock(newnfsd_mtx);
	} else {
		MALLOC(slp, struct nfssvc_sock *, sizeof(struct nfssvc_sock),
				M_NFSSVC, M_WAITOK);
		if (!slp) {
			mbuf_freem(mynam);
			return (ENOMEM);
		}
		bzero((caddr_t)slp, sizeof (struct nfssvc_sock));
		lck_rw_init(&slp->ns_rwlock, nfs_slp_rwlock_group, nfs_slp_lock_attr);
		slp->ns_sockref = ++sockref;
		slp->ns_tcpconntime = NFSD_MONOSEC;
		lck_mtx_lock(newnfsd_mtx);
		TAILQ_INSERT_TAIL(&newnfs_sockhead, slp, ns_chain);
	}

	sock_retain(so); /* grab a retain count on the socket */
	slp->ns_so = so;
	slp->ns_sotype = sotype;
	slp->ns_nam = mynam;

	socket_lock(so, 1);
	so->so_upcallarg = (caddr_t)slp;
	so->so_upcall = nfsrvd_rcv;
	so->so_rcv.sb_flags |= SB_UPCALL; /* required for freebsd merge */
	socket_unlock(so, 1);

	slp->ns_flag = SLP_VALID | SLP_NEEDQ;

	nfsrvd_wakenfsd(slp);
	lck_mtx_unlock(newnfsd_mtx);

	return (0);
}
#endif	/* NFSD */

#ifdef NFSCL
/*
 * Adds a socket to the list for servicing by nfscbds.
 */
static int
newnfs_cbaddsock(socket_t so, mbuf_t mynam)
{
	int siz;
	struct nfssvc_sock *slp;
	struct nfssvc_sock *tslp = NULL;
	int error, sodomain, sotype, soprotocol, on = 1;
	struct timeval timeo;
	static u_int64_t sockref = 0;

	/* make sure mbuf constants are set up */
	if (!ncl_mbuf_mlen)
		newncl_mbuf_init();

	sock_gettype(so, &sodomain, &sotype, &soprotocol);

	/*
	 * Add it to the list, as required.
	 */
	if (soprotocol == IPPROTO_UDP) {
		tslp = newnfs_cbudpsock;
		if (!tslp || (tslp->ns_flag & SLP_VALID)) {
			mbuf_freem(mynam);
			return (EPERM);
		}
		lck_rw_init(&tslp->ns_rwlock, nfs_slp_rwlock_group, nfs_slp_lock_attr);
	}
	/* reserve buffer space for 2 maximally-sized packets */
	siz = NFS_MAXPACKET;
	if (sotype == SOCK_STREAM)
		siz += sizeof (u_long);
	siz *= 2;
	if (siz > NFS_MAXSOCKBUF)
		siz = NFS_MAXSOCKBUF;
	if ((error = sock_setsockopt(so, SOL_SOCKET, SO_SNDBUF, &siz, sizeof(siz))) ||
	    (error = sock_setsockopt(so, SOL_SOCKET, SO_RCVBUF, &siz, sizeof(siz)))) {
		mbuf_freem(mynam);
		return (error);
	}

	/*
	 * Set protocol specific options { for now TCP only } and
	 * reserve some space. For datagram sockets, this can get called
	 * repeatedly for the same socket, but that isn't harmful.
	 */
	if (sotype == SOCK_STREAM) {
		sock_setsockopt(so, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on));
	}
	if (sodomain == AF_INET && soprotocol == IPPROTO_TCP) {
		sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
	}

	sock_nointerrupt(so, 0);

	timeo.tv_usec = 0;
	timeo.tv_sec = 0;
	error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
	error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));

	if (tslp) {
		slp = tslp;
		lck_mtx_lock(newnfsd_mtx);
	} else {
		MALLOC(slp, struct nfssvc_sock *, sizeof(struct nfssvc_sock),
				M_NFSSVC, M_WAITOK);
		if (!slp) {
			mbuf_freem(mynam);
			return (ENOMEM);
		}
		bzero((caddr_t)slp, sizeof (struct nfssvc_sock));
		lck_rw_init(&slp->ns_rwlock, nfs_slp_rwlock_group, nfs_slp_lock_attr);
		slp->ns_sockref = ++sockref;
		slp->ns_tcpconntime = NFSD_MONOSEC;
		lck_mtx_lock(newnfsd_mtx);
		TAILQ_INSERT_TAIL(&newnfscb_sockhead, slp, ns_chain);
	}

	sock_retain(so); /* grab a retain count on the socket */
	slp->ns_so = so;
	slp->ns_sotype = sotype;
	slp->ns_nam = mynam;

	socket_lock((struct socket *)so, 1);
	((struct socket *)so)->so_upcallarg = (caddr_t)slp;
	((struct socket *)so)->so_upcall = nfsrvd_cbrcv;
	((struct socket *)so)->so_rcv.sb_flags |= SB_UPCALL; /* required for freebsd merge */
	socket_unlock((struct socket *)so, 1);

	slp->ns_flag = SLP_VALID | SLP_NEEDQ;

	nfsrvd_wakenfscbd(slp);
	lck_mtx_unlock(newnfsd_mtx);

	return (0);
}
#endif	/* NFSCL */

/*
 * This function needs to test to see if the system is near its limit
 * for memory allocation via malloc() or mget() and return True iff
 * either of these resources are near their limit.
 * XXX (For now, this is just a stub.)
 */
APPLESTATIC int
nfsrv_mallocmget_limit(void)
{
	static int printmesg = 0;
	static int testval = 1;

	if (nfsrv_testmalloclimit && (testval++ % 1000) == 0) {
		if ((printmesg++ % 100) == 0)
			printf("nfsd: malloc/mget near limit\n");
		return (1);
	}
	return (0);
}

/*
 * newnfs_request - goes something like this
 *	- fill in request struct
 *	- links it into list
 *	- calls newnfs_send() for first transmit
 *	- calls nfs_receive() to get reply
 *	- break down rpc header and return with nfs reply pointed to
 *	  by mrep or error
 * nb: always frees up mreq mbuf list
 */
APPLESTATIC int
newnfs_request(struct nfsrv_descript *nd, struct nfsmount *nmp,
    struct nfsclient *clp, struct nfssockreq *nrp, struct vnode *vp,
    NFSPROC_T *p, struct ucred *cred, u_int32_t prog, u_int32_t vers,
    u_char *retsum, int toplevel, u_int64_t *xidp)
{
	mbuf_t m;
	struct nfsreq *rep;
	u_int32_t *tl;
	int i, j;
	u_char *cp;
	mbuf_t m2;
	time_t waituntil;
	struct timeval now;
	int error = 0, verf_type, trycnt, connrequired;
	u_int trylater_delay = 1;
	u_int32_t seqnum;

	if (xidp != NULL)
		*xidp = 0;

	/*
	 * If nmp != NULL, check the mount status. nmp == NULL is now a
	 * normal occurrence, meaning a server->client callback or upcall
	 * to a userland daemon.
	 */
	if (nmp != NULL && (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_TIMEO)) ==
	    (NFSSTA_FORCE | NFSSTA_TIMEO)) {
		mbuf_free(nd->nd_mreq);
		return (ENXIO);
	}

	/*
	 * Get the RPC header with authorization.
	 */
	MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSDREQ,
		M_WAITOK);
	SLIST_INIT(&rep->r_seqlist);
	if (nrp->nr_sotype == SOCK_STREAM)
		rep->r_flags = R_TCP;
	else
		rep->r_flags = 0;
	/*
	 * For a client side mount, nmp is != NULL and clp == NULL. For
	 * server calls (callbacks or upcalls), nmp == NULL.
	 */
	if (clp != NULL && (clp->lc_flags & LCL_GSS)) {
		if (toplevel)
			rep->r_flags |= R_TOPLEVEL;
		rep->r_flags |= R_KERBV;
		nd->nd_flag |= ND_KERBV;
		if (nd->nd_procnum != NFSPROC_NULL) {
			if (clp->lc_flags & LCL_GSSINTEGRITY)
				rep->r_flags |= R_INTEGRITY;
			else if (clp->lc_flags & LCL_GSSPRIVACY)
				rep->r_flags |= R_PRIVACY;
		}
	} else if (nmp != NULL && (nmp->nm_flag & NFSMNT_KERB)) {
		if (toplevel)
			rep->r_flags |= R_TOPLEVEL;
		rep->r_flags |= R_KERBV;
		nd->nd_flag |= ND_KERBV;
		if (nmp->nm_flag & NFSMNT_ALLGSSNAME)
			nd->nd_flag |= ND_USEGSSNAME;
		if (nd->nd_flag & ND_USEGSSNAME)
			rep->r_flags |= R_USEGSSNAME;
		if (nd->nd_procnum != NFSPROC_NULL) {
			if (nmp->nm_flag & NFSMNT_INTEGRITY)
				rep->r_flags |= R_INTEGRITY;
			else if (nmp->nm_flag & NFSMNT_PRIVACY)
				rep->r_flags |= R_PRIVACY;
		}
	}
	if (nmp != NULL)
		rep->r_flags |= R_CLIENTSIDE;
	rep->r_nmp = nmp;
	rep->r_clp = clp;
	rep->r_nrp = nrp;
	rep->r_procp = p;
	rep->r_handp = NULL;
	rep->r_handb = NULL;
	if (cred != NULL)
		rep->r_uid = cred->cr_uid;
	rep->r_procnum = nd->nd_procnum;
	if ((nd->nd_flag & ND_NFSV4) &&
	    nd->nd_procnum != NFSV4PROC_CBNULL &&
	    nd->nd_procnum != NFSV4PROC_CBCOMPOUND)
		rep->r_procnum = NFSV4PROC_COMPOUND;
	rep->r_prog = prog;
	rep->r_vers = vers;
	if (nmp != NULL) {
		microuptime(&now);
		rep->r_lastmsg = now.tv_sec -
		    ((nmp->nm_tprintf_delay)-(nmp->nm_tprintf_initial_delay));
	}
	m = nd->nd_mreq;
	if (m)
		i = mbuf_len(m);
	else
		i = 0;
	while (mbuf_next(m)) {
		m = mbuf_next(m);
		i += mbuf_len(m);
	}
	rep->r_mrestend = m;
	rep->r_mrestlen = i;
	rep->r_mrest = nd->nd_mreq;
DEBUG1PRINTF("request mrest=%d\n",i);
	if (rep->r_flags & R_PRIVACY) {
		/*
		 * Paranoia: These should already be correctly set, but...
		 */
		nd->nd_mb = m;
		nd->nd_bpos = ((u_int8_t *)mbuf_data(m)) + mbuf_len(m);
		/*
		 * Build a pad trailer.
		 */
		i = 8 - ((i + 3 * NFSX_UNSIGNED) % 8);
		NFSM_BUILD(cp, u_char *, i);
		for (j = 0; j < i; j++)
			*cp++ = (u_char)i;
		rep->r_mrestlen += i;
DEBUG1PRINTF("aft pad=%d rep->r_mrestlen=%d\n",i,rep->r_mrestlen);
		i = nfsgss_lengthtok(nd, rep->r_mrestlen + 3 * NFSX_UNSIGNED);
		i = NFSM_RNDUP(i) - i;
		if (i > 0) {
DEBUG1PRINTF("priv xdrpad i=%d\n",i);
			NFSM_BUILD(cp, u_char *, i);
			for (j = 0; j < i; j++)
				*cp++ = 0x0;
		}
		rep->r_mrestend = nd->nd_mb;
	}

	trycnt = 0;
tryagain:
	newnfs_rpchead(cred, rep);
	if (xidp != NULL)
		/*
		 * I have no idea what Darwin uses this for, since the
		 * order in which RPCs are generated for a server does
		 * not imply that the RPCs will be performed in that order
		 * at the server, but here it is.
		 */
		*xidp = (((u_int64_t)newnfs_xidwrap) << 32) |
		    ((u_int64_t)fxdr_unsigned(u_int32_t, rep->r_xid));
	if (rep->r_flags & (R_AUTHERR | R_SOFTTERM)) {
		if ((rep->r_flags & R_TOPLEVEL) && rep->r_handp != NULL)
			nfsgsscl_releasehandle(rep, 0);
		mbuf_freem(rep->r_mrest);
		nfsreq_free(rep);
		if (rep->r_flags & R_AUTHERR)
			return (EAUTH);
		return (EINTR);
	}

	/*
	 * and away we go, sending 1, 2, 3...
	 */
	if (nmp == NULL) {
		/*
		 * Three cases:
		 * - Null RPC callback to client
		 * - Non-Null RPC callback to client, wait a little longer
		 * - upcalls to nfsuserd and gssd (clp == NULL)
		 */
		if (clp == NULL) {
			rep->r_retry = NFSV4_UPCALLRETRY;
		} else {
			if (rep->r_procnum != NFSPROC_NULL)
				rep->r_retry = NFSV4_CALLBACKRETRY * 3;
			else
				rep->r_retry = NFSV4_CALLBACKRETRY;
		}
	} else if (nmp->nm_flag & NFSMNT_SOFT) {
		rep->r_retry = nmp->nm_retry;
	} else {
		rep->r_retry = NFS_MAXREXMIT + 1;	/* past clip limit */
	}
	rep->r_rtt = rep->r_rexmit = 0;
	if (nmp && proct[rep->r_procnum] > 0)
		rep->r_flags |= R_TIMING;
	rep->r_mrep = NULL;

	/*
	 * Do the client side RPC.
	 */
	if (nmp != NULL)
		OSAddAtomic(1, (SInt32 *)&newnfsstats.rpcrequests);
	/*
	 * Chain request into list of outstanding requests. Be sure
	 * to put it LAST so timer finds oldest requests first.
	 */
DEBUG2PRINTF("newnfs Req atQ err=%d\n",error);
	lck_mtx_lock(nfs_req_slock);
	TAILQ_INSERT_TAIL(&nfsd_reqq, rep, r_chain);
	lck_mtx_unlock(nfs_req_slock);

	/*
	 * From here until the error return after nfs_reply(),
	 * r_nmp should be used, since it can be cleared by
	 * nfs_unmount() when the request is on the queue.
	 */
	/*
	 * If backing off another request or avoiding congestion, don't
	 * send this one now but let timer do it. If not timing a request,
	 * do it now.
	 */
	if (nrp->nr_so && (nrp->nr_sotype != SOCK_DGRAM || rep->r_nmp == NULL ||
		(rep->r_nmp->nm_flag & NFSMNT_DUMBTIMR) ||
		rep->r_nmp->nm_sent < rep->r_nmp->nm_cwnd)) {
		connrequired = (nrp->nr_sotype == SOCK_STREAM);
DEBUG2PRINTF("at send connreq=%d\n",connrequired);
		if (connrequired)
			error = newnfs_sndlock(&nrp->nr_lock, rep);
		if (!error) {
			if ((rep->r_flags & R_MUSTRESEND) == 0) {
				if (rep->r_nmp != NULL)
					rep->r_nmp->nm_sent += NFS_CWNDSCALE;
				lck_mtx_lock(nfs_req_slock);
				rep->r_flags |= R_SENT;
				lck_mtx_unlock(nfs_req_slock);
			}
			error = mbuf_copym(rep->r_mreq, 0, MBUF_COPYALL,
			    M_WAITOK, &m2);
			if (!error)
				error = newnfs_send((socket_t)nrp->nr_so, nrp->nr_nam,
				    m2, rep, p);
			if (connrequired)
				newnfs_sndunlock(&nrp->nr_lock);
		}
if (error) printf("send err=%d\n",error);
DEBUG2PRINTF("aft send err=%d\n",error);
		if (error) {
			if (rep->r_nmp != NULL)
				rep->r_nmp->nm_sent -= NFS_CWNDSCALE;
			lck_mtx_lock(nfs_req_slock);
			rep->r_flags &= ~R_SENT;
			lck_mtx_unlock(nfs_req_slock);
		}
	} else {
		rep->r_rtt = -1;
	}

	/*
	 * Wait for the reply from our send or the timer's.
	 */
	if (error == 0 || error == EPIPE)
		error = nfs_reply(rep, nd, cred, p);

	/*
	 * RPC done, unlink the request.
	 */
	nfs_repdequeue(rep);

	/*
	 * Decrement the outstanding request count.
	 */
	if (rep->r_flags & R_SENT) {
		rep->r_flags &= ~R_SENT;	/* paranoia */
		if (rep->r_nmp != NULL)
			rep->r_nmp->nm_sent -= NFS_CWNDSCALE;
	}

	/*
	 * Update seqnum window.
	 */
	if (rep->r_flags & R_GSSGOTSEQ) {
		rep->r_flags &= ~R_GSSGOTSEQ;
		nfsgss_rcvdclseq(rep);
	}

	/*
	 * If there was a successful reply and a tprintf msg.
	 * tprintf a response.
	 */
	if (rep->r_nmp != NULL && error == 0 && (rep->r_flags & R_TPRINTFMSG))
		nfs_up(rep->r_nmp, rep->r_procp, NFSSTA_TIMEO,
		    "is alive again");
DEBUG2PRINTF("newnfs_request at dissect err=%d\n",error);
	if (error) {
		if (rep->r_flags & R_TOPLEVEL)
			nfsgsscl_releasehandle(rep, 0);
		mbuf_freem(rep->r_mreq);
		nfsreq_free(rep);
		return (error);
	}

	/*
	 * break down the rpc header and check if ok
	 */
	NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
	if (*tl++ == newrpc_msgdenied) {
DEBUG2PRINTF("msg denied\n");
		if (*tl == newrpc_mismatch) {
			error = EOPNOTSUPP;
		} else if (*tl == newrpc_autherr) {
			i = fxdr_unsigned(int, *++tl);
DEBUG1PRINTF("autherr=%d try=%d\n",i,trycnt);
			if ((i == AUTH_PROBCRED || i == AUTH_CTXCRED) &&
			    (rep->r_flags & R_TOPLEVEL) && ++trycnt < 4) {
				/*
				 * Try again after discarding failed handle.
				 */
				if ((rep->r_flags & R_PRIVACY) &&
				    rep->r_procnum != NFSPROC_NULL)
				    (void) nfsgss_des(rep->r_startm,
					rep->r_startpos, rep->r_mrestlen +
					3 * NFSX_UNSIGNED, rep->r_key, 0);
				mbuf_freem(rep->r_mrep);
				mbuf_setnext(rep->r_mheadend, NULL);
				mbuf_freem(rep->r_mreq);
				if (mbuf_next(rep->r_mrestend)) {
					mbuf_freem(mbuf_next(rep->r_mrestend));
					mbuf_setnext(rep->r_mrestend, NULL);
				}
				nd->nd_repstat = 0;
				nfsgsscl_zaphandle(rep);
				nfsgsscl_releasehandle(rep, 0);
				rep->r_handp = NULL;
DEBUG1PRINTF("try again\n");
				goto tryagain;
			}
			error = EAUTH;
		} else {
			error = EACCES;
		}
		if (rep->r_flags & R_TOPLEVEL)
			nfsgsscl_releasehandle(rep, 0);
		mbuf_freem(rep->r_mrep);
		mbuf_freem(rep->r_mreq);
		nfsreq_free(rep);
DEBUG2PRINTF("req msg denied val=0x%x err=%d\n",*tl,error);
		return (error);
	}

	/*
	 * Grab any Kerberos verifier, otherwise just throw it away.
	 */
	verf_type = fxdr_unsigned(int, *tl++);
	i = fxdr_unsigned(int, *tl);
	if (verf_type == RPCAUTH_GSS) {
		error = nfsgss_verf(nd, rep->r_handp->nfsh_sched,
		    rep->r_seqnum, rep, i, retsum);
		/*
		 * AUTH_PROBCRED indicates that the verifier has failed.
		 * For UDP, this can be the result of an old seq# that
		 * wasn't kept, so set NFSERR_DELAY to get it to try again.
		 */
		if (error == AUTH_PROBCRED && (rep->r_flags & R_TCP) == 0) {
			nd->nd_repstat = NFSERR_DELAY;
			error = 0;
		}
	} else if (i > 0) {
		error = nfsm_advance(nd, NFSM_RNDUP(i), -1);
	}
	if (error) {
DEBUG1PRINTF("reply err=%d\n",error);
		goto nfsmout;
	}
	NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
	/* 0 == ok */
	if (*tl == 0) {
DEBUG2PRINTF("req rep ok ndflag=0x%x proc=%d\n",nd->nd_flag,nd->nd_procnum);
	    if (nd->nd_procnum != NFSPROC_NULL) {
		/*
		 * Any nfs proc except Null.
		 */
		if (rep->r_flags & R_INTEGRITY) {
DEBUG1PRINTF("reply integ\n");
		    NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
		    j = fxdr_unsigned(int, *tl);
		    if (j < NFSX_UNSIGNED) {
			error = EBADRPC;
			goto nfsmout;
		    }
		    nd->nd_startm = nd->nd_md;
		    nd->nd_startpos = nd->nd_dpos;
		    error = nfsm_advance(nd, j, -1);
DEBUG1PRINTF("adv err=%d\n",error);
		    if (error)
			goto nfsmout;
		    NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
		    i = fxdr_unsigned(int, *tl);
		    i = nfsgss_ckheader(nd, i, j, 1, rep->r_handp,
			rep->r_handp->nfsh_sched);
		    if (i) {
DEBUG1PRINTF("ckhdr failed=%d\n",i);
			error = EAUTH;
			goto nfsmout;
		    }
		    nd->nd_md = nd->nd_startm;
		    nd->nd_dpos = nd->nd_startpos;
DEBUG1PRINTF("eo reply integ\n");
		} else if (rep->r_flags & R_PRIVACY) {
DEBUG1PRINTF("reply priv\n");
		    NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
		    j = fxdr_unsigned(int, *tl);
DEBUG1PRINTF("priv datalen=%d\n",j);
		    if (j < GSSX_MINWRAP) {
			error = EBADRPC;
			goto nfsmout;
		    }
		    i = nfsgss_unwrap(nd, j, 1, rep->r_handp->nfsh_key,
			rep->r_handp->nfsh_sched);
DEBUG1PRINTF("aft unwrap ret=%d\n",i);
		    if (i) {
			error = EAUTH;
			goto nfsmout;
		    }
		}
		if (rep->r_flags & (R_INTEGRITY | R_PRIVACY)) {
		    NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
		    seqnum = fxdr_unsigned(u_int32_t, *tl);
DEBUG1PRINTF("rep seq=%d fseq=%d\n",seqnum,rep->r_seqnum);
		    if (seqnum != rep->r_seqnum) {
DEBUG1PRINTF("bad seqnum\n");
			if ((rep->r_flags & R_TCP) == 0) {
			    nd->nd_repstat = NFSERR_DELAY;
			} else {
			    error = EAUTH;
			    goto nfsmout;
			}
		    }
		}

		/*
		 * and now the actual NFS xdr.
		 */
		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
		if (nd->nd_repstat != NFSERR_DELAY)
			nd->nd_repstat = fxdr_unsigned(u_int32_t, *tl);
		if (nd->nd_repstat != 0) {
			if ((nd->nd_repstat == NFSERR_DELAY &&
			     (nd->nd_flag & ND_NFSV4) &&
			     nd->nd_procnum != NFSPROC_READ &&
			     nd->nd_procnum != NFSPROC_WRITE &&
			     nd->nd_procnum != NFSPROC_SETATTR &&
			     nd->nd_procnum != NFSPROC_OPEN &&
			     nd->nd_procnum != NFSPROC_CREATE &&
			     nd->nd_procnum != NFSPROC_OPENCONFIRM &&
			     nd->nd_procnum != NFSPROC_OPENDOWNGRADE &&
			     nd->nd_procnum != NFSPROC_CLOSE &&
			     nd->nd_procnum != NFSPROC_LOCK &&
			     nd->nd_procnum != NFSPROC_LOCKU) ||
			    (nd->nd_repstat == NFSERR_DELAY &&
			     (nd->nd_flag & ND_NFSV4) == 0) ||
			    nd->nd_repstat == NFSERR_RESOURCE) {
				if (rep->r_flags & R_TOPLEVEL) {
					nfsgsscl_releasehandle(rep, 0);
					rep->r_handp = NULL;
				}
				if ((rep->r_flags & R_PRIVACY) &&
				    rep->r_procnum != NFSPROC_NULL)
				    (void) nfsgss_des(rep->r_startm,
					rep->r_startpos, rep->r_mrestlen +
					3 * NFSX_UNSIGNED, rep->r_key, 0);
				mbuf_freem(rep->r_mrep);
				mbuf_setnext(rep->r_mheadend, NULL);
				mbuf_freem(rep->r_mreq);
				if (mbuf_next(rep->r_mrestend)) {
					mbuf_freem(mbuf_next(rep->r_mrestend));
					mbuf_setnext(rep->r_mrestend, NULL);
				}
				nd->nd_repstat = 0;
				if (trylater_delay > NFS_TRYLATERDEL)
					trylater_delay = NFS_TRYLATERDEL;
				waituntil = NFSD_MONOSEC + trylater_delay;
				while (NFSD_MONOSEC < waituntil)
					(void) tsleep((caddr_t)&lbolt,
						PSOCK, "nfstry", 0);
				trylater_delay *= 2;
DEBUG2PRINTF("try again2\n");
				goto tryagain;
			}

			/*
			 * If the File Handle was stale, invalidate the
			 * lookup cache, just in case.
			 */
			if (nd->nd_repstat == ESTALE && vp)
				cache_purge(vp);
		}

		/*
		 * Get rid of the tag, return count, and PUTFH result for V4.
		 */
		if (nd->nd_flag & ND_NFSV4) {
			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
			i = fxdr_unsigned(int, *tl);
			error = nfsm_advance(nd, NFSM_RNDUP(i), -1);
			if (error)
				goto nfsmout;
			NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
			i = fxdr_unsigned(int, *++tl);

			/*
			 * If the first op's status is non-zero, mark that
			 * there is no more data to process.
			 */
			if (*++tl)
				nd->nd_flag |= ND_NOMOREDATA;

			/*
			 * If the first op is Putfh, throw its results away
			 * and toss the op# and status for the first op.
			 */
			if (nmp && i == NFSV4OP_PUTFH && !(*tl)) {
				NFSM_DISSECT(tl,u_int32_t *,2 * NFSX_UNSIGNED);
				i = fxdr_unsigned(int, *tl++);
				j = fxdr_unsigned(int, *tl);
				/*
				 * All Compounds that do an Op that must
				 * be in sequence consist of NFSV4OP_PUTFH
				 * followed by one of these. As such, we
				 * can determine if the seqid# should be
				 * incremented, here.
				 */
				if ((i == NFSV4OP_OPEN ||
				     i == NFSV4OP_OPENCONFIRM ||
				     i == NFSV4OP_OPENDOWNGRADE ||
				     i == NFSV4OP_CLOSE ||
				     i == NFSV4OP_LOCK ||
				     i == NFSV4OP_LOCKU) &&
				    (j == 0 ||
				     (j != NFSERR_STALECLIENTID &&
				      j != NFSERR_STALESTATEID &&
				      j != NFSERR_BADSTATEID &&
				      j != NFSERR_BADSEQID &&
				      j != NFSERR_BADXDR &&	 
				      j != NFSERR_RESOURCE &&
				      j != NFSERR_NOFILEHANDLE)))		 
					nd->nd_flag |= ND_INCRSEQID;
				/*
				 * If the first op's status is non-zero, mark
				 * that there is no more data to process.
				 */
				if (j)
					nd->nd_flag |= ND_NOMOREDATA;
			}

			/*
			 * If R_DONTRECOVER is set, replace the stale error
			 * reply, so that recovery isn't initiated.
			 */
			if ((nd->nd_repstat == NFSERR_STALECLIENTID ||
			     nd->nd_repstat == NFSERR_STALESTATEID) &&
			    (rep->r_flags & R_DONTRECOVER))
				nd->nd_repstat = NFSERR_STALEDONTRECOVER;
		}
	    }

	    if (rep->r_flags & R_TOPLEVEL)
		nfsgsscl_releasehandle(rep, 0);
	    mbuf_freem(rep->r_mreq);
	    nfsreq_free(rep);
	    return (0);
	}
DEBUG1PRINTF("accept stat=%d\n",fxdr_unsigned(int, *tl));
	error = EPROTONOSUPPORT;
nfsmout:
	if (rep->r_flags & R_TOPLEVEL)
		nfsgsscl_releasehandle(rep, 0);
	mbuf_freem(rep->r_mrep);
	mbuf_freem(rep->r_mreq);
	nfsreq_free(rep);
	return (error);
}

/*
 * From FreeBSD 1.58, a Matt Dillon fix...
 * Flag a request as being about to terminate.
 * The nm_sent count is decremented now to avoid deadlocks when the process
 * in soreceive() hasn't yet managed to send its own request.
 */
static void
nfs_softterm(struct nfsreq *rep)
{

	rep->r_flags |= R_SOFTTERM;
	if (rep->r_flags & R_SENT) {
		if (rep->r_nmp != NULL)
			rep->r_nmp->nm_sent -= NFS_CWNDSCALE;
		rep->r_flags &= ~R_SENT;
	}
}

/*
 * Ensure rep isn't in use by the timer, then dequeue it.
 */
static void
nfs_repdequeue(struct nfsreq *rep)
{

	lck_mtx_lock(nfs_req_slock);
	while ((rep->r_flags & R_BUSY)) {
		rep->r_flags |= R_WAITING;
		(void) msleep(rep, nfs_req_slock, PSOCK, "repdeq", 0);
	}
	TAILQ_REMOVE(&nfsd_reqq, rep, r_chain);
	lck_mtx_unlock(nfs_req_slock);
}

/*
 * Busy (lock) a nfsreq, used by the nfs timer to make sure it's not
 * free()'d out from under it.
 */
static void
nfs_repbusy(struct nfsreq *rep)
{

	if ((rep->r_flags & R_BUSY))
		panic("rep locked");
	rep->r_flags |= R_BUSY;
}

/*
 * Unbusy the nfsreq passed in, return the next nfsreq in the chain busied.
 */
static struct nfsreq *
nfs_repnext(struct nfsreq *rep)
{
	struct nfsreq * nextrep;

	if (rep == NULL)
		return (NULL);
	/*
	 * We need to get and busy the next req before signalling the
	 * current one, otherwise wakeup() may block us and we'll race to
	 * grab the next req.
	 */
	lck_mtx_lock(nfs_req_slock);
	nextrep = TAILQ_NEXT(rep, r_chain);
	if (nextrep != NULL)
		nfs_repbusy(nextrep);
	/* unbusy and signal. */
	rep->r_flags &= ~R_BUSY;
	if ((rep->r_flags & R_WAITING)) {
		rep->r_flags &= ~R_WAITING;
		wakeup(rep);
	}
	lck_mtx_unlock(nfs_req_slock);
	return (nextrep);
}

/*
 * Nfs timer routine
 * Scan the nfsreq list and retranmit any requests that have timed out
 * To avoid retransmission attempts on STREAM sockets (in the future) make
 * sure to set the r_retry field to 0 (implies nm_retry == 0).
 */
void
newnfs_timer(__unused void *arg)
{
	struct nfsreq *rep;
	mbuf_t m;
	socket_t so;
	struct nfsmount *nmp;
	struct nfssockreq *nrp;
	int timeo;
	int error;
	int flags, rexmit, cwnd = 0, sent = 0;
	u_long xid;
	struct timeval now;
	struct nfssvc_sock *slp, *nslp;
	static time_t lasttime = 0;

	lck_mtx_lock(nfs_req_slock);
	rep = TAILQ_FIRST(&nfsd_reqq);
	if (rep != NULL)
		nfs_repbusy(rep);
	lck_mtx_unlock(nfs_req_slock);
	microuptime(&now);
	for ( ; rep != NULL ; rep = nfs_repnext(rep)) {
		nmp = rep->r_nmp;
		nrp = rep->r_nrp;
		if (rep->r_flags & R_UNMOUNT) /* unmounted */
			continue;
		if (rep->r_mrep || (rep->r_flags & R_SOFTTERM))
			continue;
		if (newnfs_sigintr(nmp, rep, rep->r_procp)) {
			rep->r_flags |= R_SOFTTERM;
			continue;
		}
		if (nmp != NULL && nmp->nm_tprintf_initial_delay != 0 &&
		    (rep->r_rexmit > 2 || (rep->r_flags & R_RESENDERR)) &&
		    rep->r_lastmsg + nmp->nm_tprintf_delay < now.tv_sec) {
			rep->r_lastmsg = now.tv_sec;
			nfs_down(rep->r_nmp, rep->r_procp, 0, NFSSTA_TIMEO,
				"not responding");
			rep->r_flags |= R_TPRINTFMSG;
			if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
				/* we're not yet completely mounted and */
				/* we can't complete an RPC, so we fail */
				OSAddAtomic(1, (SInt32*)&newnfsstats.rpctimeouts);
				nfs_softterm(rep);
				continue;
			}
		}
		if (rep->r_rtt >= 0) {
			rep->r_rtt++;
			if (nmp == NULL) {
				if (rep->r_clp == NULL)
					timeo = NFSV4_UPCALLTIMEO;
				else
					timeo = NFSV4_CALLBACKTIMEO;
			} else if (nrp->nr_sotype != SOCK_DGRAM) {
				timeo = NFS_TCPTIMEO;
			} else {
				if (nmp->nm_flag & NFSMNT_DUMBTIMR)
					timeo = nmp->nm_timeo;
				else
					timeo = NFS_RTO(nmp, proct[rep->r_procnum]);
				/* ensure 62.5 ms floor */
				while (16 * timeo < hz)
					timeo *= 2;
				if (nmp->nm_timeouts > 0)
					timeo *= nfs_backoff[nmp->nm_timeouts - 1];
			}
			if (rep->r_rtt <= timeo)
				continue;
			if (nmp != NULL && nmp->nm_timeouts < 8)
				nmp->nm_timeouts++;
		}
		/*
		 * Check for too many retransmits.  This is never true for
		 * 'hard' mounts because we set r_retry to NFS_MAXREXMIT + 1
		 * and never allow r_rexmit to be more than NFS_MAXREXMIT.
		 */
		if (rep->r_rexmit >= rep->r_retry) {	/* too many */
			OSAddAtomic(1, (SInt32*)&newnfsstats.rpctimeouts);
			nfs_softterm(rep);
			continue;
		}
		/*
		 * For callbacks and NFS V4 over reliable transport, never
		 * do retries.
		 * (I felt this was correct for NFS over TCP, but some servers
		 *  do throw away requests, so a long timeout/retransmit is
		 *  required for V2,3 NFS over TCP.)
		 */
		if (nrp->nr_sotype != SOCK_DGRAM &&
		    (nmp == NULL || (nmp->nm_flag & NFSMNT_NFSV4))) {
			if (++rep->r_rexmit > NFS_MAXREXMIT)
				rep->r_rexmit = NFS_MAXREXMIT;
			continue;
		}
		if ((so = (socket_t)rep->r_nrp->nr_so) == NULL)
			continue;

		/*
		 * If there is enough space and the window allows..
		 *	Resend it
		 * Set r_rtt to -1 in case we fail to send it now.
		 */
		rep->r_rtt = -1;
		if ((nmp == NULL || (nmp->nm_flag & NFSMNT_DUMBTIMR) ||
		     (rep->r_flags & R_SENT) ||
		     (nrp->nr_sotype != SOCK_DGRAM) ||
		     nmp->nm_sent < nmp->nm_cwnd) &&
		    (mbuf_copym(rep->r_mreq, 0, MBUF_COPYALL, MBUF_DONTWAIT, &m)
		     == 0)) {
			struct msghdr	msg;
			/*
			 * Iff first send, start timing
			 * else turn timing off, backoff timer
			 * and divide congestion window by 2.
			 * We update these *before* the send to avoid
			 * racing against receiving the reply.
			 * We save them so we can restore them on send error.
			 */
			flags = rep->r_flags;
			rexmit = rep->r_rexmit;
			if (nmp != NULL) {
				cwnd = nmp->nm_cwnd;
				sent = nmp->nm_sent;
			}
			xid = rep->r_xid;
			if (rep->r_flags & R_SENT) {
				rep->r_flags &= ~R_TIMING;
				if (++rep->r_rexmit > NFS_MAXREXMIT)
					rep->r_rexmit = NFS_MAXREXMIT;
				if (nmp != NULL) {
					nmp->nm_cwnd >>= 1;
					if (nmp->nm_cwnd < NFS_CWNDSCALE)
						nmp->nm_cwnd = NFS_CWNDSCALE;
				}
				OSAddAtomic(1, (SInt32*)&newnfsstats.rpcretries);
			} else {
				rep->r_flags |= R_SENT;
				if (nmp != NULL)
					nmp->nm_sent += NFS_CWNDSCALE;
			}

	 		bzero(&msg, sizeof(msg));
			if ((((struct socket *)so)->so_state & SS_ISCONNECTED) == 0) {
				msg.msg_name = nrp->nr_nam;
				msg.msg_namelen = sizeof (nrp->nr_nam);
			}
			error = sock_sendmbuf(so, &msg, m, MSG_DONTWAIT, NULL);


			if (error) {
				if (error == EWOULDBLOCK) {
					rep->r_flags = flags;
					rep->r_rexmit = rexmit;
					if (nmp != NULL) {
						nmp->nm_cwnd = cwnd;
						nmp->nm_sent = sent;
					}
					rep->r_xid = xid;
				}
				else {
					if (NFSIGNORE_SOERROR(nrp->nr_sotype, error)) {
						int clearerror;
						int optlen = sizeof(clearerror);
						sock_getsockopt((socket_t)nrp->nr_so, SOL_SOCKET, SO_ERROR, &clearerror, &optlen);
					}
					rep->r_flags  = flags | R_RESENDERR;
					rep->r_rexmit = rexmit;
					if (nmp != NULL) {
						nmp->nm_cwnd = cwnd;
						nmp->nm_sent = sent;
					}
					if (flags & R_SENT)
						OSAddAtomic(-1, (SInt32*)&newnfsstats.rpcretries);
				}
			} else
				rep->r_rtt = 0;
		}
	}
	microuptime(&now);

	if (nclbuffreeuptimestamp + 30 <= now.tv_sec) {
		/*
		 * We haven't called ncl_buf_freeup() in a little while.
		 * So, see if we can free up any stale/unused bufs now.
		 */
		ncl_buf_freeup(1);
	}

	/*
	 * Call the server timer.
	 * The argument indicates if it is the next second and therefore
	 * leases should be checked.
	 */
	if (lasttime != NFSD_MONOSEC) {
		lasttime = NFSD_MONOSEC;
#if defined(NFSD)
		nfsrv_servertimer();
#endif /* NFSD */
		NFSD_LOCK();
		slp = TAILQ_FIRST(&newnfs_deadsockhead);
		while (slp != NULL) {
			nslp = TAILQ_NEXT(slp, ns_chain);
			if (slp->ns_timestamp >= NFSD_MONOSEC)
				break;
			TAILQ_REMOVE(&newnfs_deadsockhead, slp, ns_chain);
			nfsrvd_slpfree(slp);
			slp = nslp;
		}
		slp = TAILQ_FIRST(&newnfscb_deadsockhead);
		while (slp != NULL) {
			nslp = TAILQ_NEXT(slp, ns_chain);
			if (slp->ns_timestamp >= NFSD_MONOSEC)
				break;
			TAILQ_REMOVE(&newnfscb_deadsockhead, slp, ns_chain);
			nfsrvd_slpfree(slp);
			slp = nslp;
		}
		NFSD_UNLOCK();
	}
	timeout(newnfs_timer_funnel, (void *)0, nfscl_ticks);

}

/*
 * not yet
 */
APPLESTATIC int
nfsrv_atroot(__unused vnode_t vp, __unused long *intp)
{

	return (0);
}

/*
 * Not yet
 */
APPLESTATIC struct nfsreferral *
nfsv4root_getreferral(__unused vnode_t vp, __unused vnode_t vp2, __unused u_int32_t i)
{
	return (NULL);
}

/*
 * Copy NFS uid, gids to the cred structure.
 */
APPLESTATIC void
newnfs_copycred(nfscr, cr)
	struct nfscred *nfscr;
	struct ucred *cr;
{
	int ngroups, i;

	cr->cr_uid = nfscr->nfsc_uid;
	ngroups = (nfscr->nfsc_ngroups < NGROUPS) ?
	    nfscr->nfsc_ngroups : NGROUPS;
	for (i = 0; i < ngroups; i++)
		cr->cr_groups[i] = nfscr->nfsc_groups[i];
	cr->cr_ngroups = ngroups;
}

/*
 * Copy NFS uid, gids from the cred structure.
 */
APPLESTATIC void
newnfs_copyincred(cr, nfscr)
	struct ucred *cr;
	struct nfscred *nfscr;
{
	int ngroups, i;

	nfscr->nfsc_uid = cr->cr_uid;
	ngroups = (cr->cr_ngroups > NGROUPS) ? NGROUPS :
	    cr->cr_ngroups;
	for (i = 0; i < ngroups; i++)
		nfscr->nfsc_groups[i] = cr->cr_groups[i];
	nfscr->nfsc_ngroups = ngroups;
}

APPLESTATIC int
nfsm_mbufuio(struct nfsrv_descript *nd, struct uio *uiop, int siz)
{
	char *mbufcp, *uiocp;
	int xfer, left, len;
	mbuf_t mp;
	long uiosiz, rem;
	int error = 0;

	mp = nd->nd_md;
	mbufcp = nd->nd_dpos;
	len = (caddr_t)mbuf_data(mp) + mbuf_len(mp) - mbufcp;
	rem = NFSM_RNDUP(siz) - siz;
	while (siz > 0) {
		if (uiop->uio_iovcnt <= 0 || uiop->uio_iovs.iov32p == NULL)
			return (EFBIG);
		// LP64todo - fix this!
		left = uio_iov_len(uiop);
		uiocp = CAST_DOWN(caddr_t, uio_iov_base(uiop));
		if (left > siz)
			left = siz;
		if (left == 0)
			panic("nfsmbuio");
		uiosiz = left;
		while (left > 0) {
			while (len == 0) {
				mp = mbuf_next(mp);
				if (mp == NULL)
					return (EBADRPC);
				mbufcp = mbuf_data(mp);
				len = mbuf_len(mp);
			}
			xfer = (left > len) ? len : left;
			if (UIO_SEG_IS_USER_SPACE(uiop->uio_segflg))
				copyout(mbufcp, CAST_USER_ADDR_T(uiocp), xfer);
			else
				bcopy(mbufcp, uiocp, xfer);
			left -= xfer;
			len -= xfer;
			mbufcp += xfer;
			uiocp += xfer;
			uiop->uio_offset += xfer;
			uio_uio_resid_add(uiop, -xfer);
		}
		if (uio_iov_len(uiop) <= (size_t)siz) {
			uiop->uio_iovcnt--;
			uio_next_iov(uiop);
		} else {
			uio_iov_base_add(uiop, uiosiz);
			uio_iov_len_add(uiop, -uiosiz);
		}
		siz -= uiosiz;
	}
	nd->nd_dpos = mbufcp;
	nd->nd_md = mp;
	if (rem > 0) {
		if (len < rem)
			error = nfsm_advance(nd, rem, len);
		else
			nd->nd_dpos += rem;
	}
	return (error);
}

/*
 * Derefence a server socket structure. If it has no more references and
 * is no longer valid, you can throw it away.
 */
static void
nfsrv_slpderef(struct nfssvc_sock *slp, int callb)
{

	lck_mtx_lock(newnfsd_mtx);
	lck_rw_lock_exclusive(&slp->ns_rwlock);
	slp->ns_sref--;
	if (slp->ns_sref || (slp->ns_flag & SLP_VALID)) {
		lck_rw_done(&slp->ns_rwlock);
		lck_mtx_unlock(newnfsd_mtx);
		return;
	}

	/* queue the socket up for deletion */
	slp->ns_timestamp = NFSD_MONOSEC + 5;
	if (callb) {
		TAILQ_REMOVE(&newnfscb_sockhead, slp, ns_chain);
		TAILQ_INSERT_TAIL(&newnfscb_deadsockhead, slp, ns_chain);
#ifdef NFSD
	} else {
		TAILQ_REMOVE(&newnfs_sockhead, slp, ns_chain);
		TAILQ_INSERT_TAIL(&newnfs_deadsockhead, slp, ns_chain);
#endif
	}
	lck_rw_done(&slp->ns_rwlock);
	if (slp == newnfs_cbudpsock)
		newnfs_cbudpsock = NULL;
#ifdef NFSD
	else if (slp == newnfs_udpsock)
		newnfs_udpsock = NULL;
#endif
	lck_mtx_unlock(newnfsd_mtx);
}

/*
 * Handle the NFSv4 callbacks.
 * Only a single nfscbd server thread is supported, but that should be
 * sufficient.
 */
APPLESTATIC int
nfsrvd_nfscbd(NFSPROC_T *p)
{
	mbuf_t m;
	int siz;
	struct nfssvc_sock *slp;
	struct nfsd *nfscbd;
	struct nfsrv_descript *nd;
	struct ucred tcred;
	int error = 0, dorep = RC_DOIT;
	boolean_t funnel_state;

	MALLOC(nfscbd, struct nfsd *, sizeof (struct nfsd), M_NFSD, M_WAITOK);
	NFSBZERO((caddr_t)nfscbd, sizeof (struct nfsd));
	MALLOC(nd, struct nfsrv_descript *, sizeof (struct nfsrv_descript),
	    M_NFSNFSDDESC, M_WAITOK);
	/*
	 * The credentials are filled in from the RPC header, but aren't
	 * used for anything at this time, so I don't see a need to
	 * create them properly using kauth_cred_XXX.
	 */
	newnfs_setroot(&tcred);
	nd->nd_cred = &tcred;
	TAILQ_INSERT_TAIL(&newnfscbd_head, nfscbd, nfsd_chain);
	nfs_numnfscbd++;

	funnel_state = thread_funnel_set(kernel_flock, FALSE);
	/*
	 * Loop getting rpc requests until SIGKILL.
	 */
	for (;;) {
		if ((nfscbd->nfsd_flag & NFSD_REQINPROG) == 0) {
			while (nfscbd->nfsd_slp == NULL &&
			    (newnfscbd_head_flag & NFSD_CHECKSLP) == 0) {
				nfscbd->nfsd_flag |= NFSD_WAITING;
				nfscbd_waiting++;
				error = tsleep((caddr_t)nfscbd, PSOCK | PCATCH,
				    "nfscbd", 0);
				nfscbd_waiting--;
				if (error)
					goto done;
			}
			if (nfscbd->nfsd_slp == NULL &&
			    (newnfscbd_head_flag & NFSD_CHECKSLP) != 0) {
				TAILQ_FOREACH(slp, &newnfscb_sockhead, ns_chain) {
				    lck_rw_lock_exclusive(&slp->ns_rwlock);
				    if ((slp->ns_flag & (SLP_VALID | SLP_DOREC))
					== (SLP_VALID | SLP_DOREC)) {
					    slp->ns_flag &= ~SLP_DOREC;
					    slp->ns_sref++;
					    nfscbd->nfsd_slp = slp;
					    lck_rw_done(&slp->ns_rwlock);
					    break;
				    }
				    lck_rw_done(&slp->ns_rwlock);
				}
				if (slp == NULL)
					newnfscbd_head_flag &= ~NFSD_CHECKSLP;
			}
			if ((slp = nfscbd->nfsd_slp) == NULL)
				continue;
			lck_rw_lock_exclusive(&slp->ns_rwlock);
			if (slp->ns_flag & SLP_VALID) {
				if ((slp->ns_flag & (SLP_NEEDQ|SLP_DISCONN)) == SLP_NEEDQ) {
					slp->ns_flag &= ~SLP_NEEDQ;
					nfsrvd_cbrcv_locked(slp->ns_so, slp, MBUF_WAITOK);
				}
				if (slp->ns_flag & SLP_DISCONN)
					nfsrvd_zapsock(slp);
				error = nfsrvd_dorec(slp, nfscbd, nd, p, 1);
				nfscbd->nfsd_flag |= NFSD_REQINPROG;
			}
			lck_rw_done(&slp->ns_rwlock);
		} else {
			error = 0;
			slp = nfscbd->nfsd_slp;
		}
		if (error || (slp->ns_flag & SLP_VALID) == 0) {
			nfscbd->nfsd_slp = NULL;
			nfscbd->nfsd_flag &= ~NFSD_REQINPROG;
			nfsrv_slpderef(slp, 1);
			continue;
		}
		if (nd) {
		    if (slp->ns_sotype == SOCK_STREAM)
			nd->nd_flag |= ND_STREAMSOCK;
		    NFSGETTIME(&nd->nd_starttime);
		    if (nd->nd_nam2)
			nd->nd_nam = nd->nd_nam2;
		    else
			nd->nd_nam = slp->ns_nam;

		    /*
		     * Several cases, as set up by nfsrvd_dorec().
		     * 1 - nd_repstat == NFSERR_DONTREPLY, which means
		     *     just silently drop the request
		     * 2 - nd_repstat set to some other error, which
		     *     means that an RPC layer error has occurred,
		     *     so generate the reply now
		     * 3 - nd_mreq not NULL, which means the reply has
		     *     already been created, via nfsgss_continit()
		     * 4 - nd_procnum == NFSPROC_NULL with nd_mreq == NULL,
		     *     is a normal null rpc, so just create reply
		     * 5 - nd_repstat == 0 && nd_mreq == NULL, which
		     *     means a normal nfs rpc, so check the cache
		     */
		    if (nd->nd_repstat == NFSERR_DONTREPLY)
			dorep = RC_DROPIT;
		    else if (nd->nd_repstat || nd->nd_mreq)
			dorep = RC_REPLY;
		    else if (nd->nd_procnum == NFSPROC_NULL) {
			nd->nd_repstat = NFSERR_RETVOID;
			NFSINCRGLOBAL(newnfsstats.cbrpccnt[nd->nd_procnum]);
			dorep = RC_REPLY;
		    } else
			dorep = RC_DOIT;
		}

		/*
		 * Handle the request. There are three cases.
		 * RC_DOIT - do the RPC
		 * RC_REPLY - return the reply generated by nfsrvd_getcache()
		 * RC_DROPIT - just throw the request away
		 */
		if (dorep == RC_DOIT) {
			nfscl_docb(nd, p);
			if (nd->nd_repstat == NFSERR_DONTREPLY) {
				if (nd->nd_mreq)
					mbuf_freem(nd->nd_mreq);
				dorep = RC_DROPIT;
			}
		}
		switch (dorep) {
		    case RC_DOIT:
		    case RC_REPLY:
			nfsrv_replyheader(nd, slp, p);
			m = nd->nd_mreq;
			siz = 0;
			while (m) {
				siz += mbuf_len(m);
				m = mbuf_next(m);
			}
DEBUG2PRINTF("reply len=%d\n",siz);
			if (siz <= 0) {
				printf("mbuf siz=%d\n",siz);
				panic("Bad nfscbd reply size");
			} else if (siz > NFS_MAXPACKET) {
				printf("nfs reply siz=%d TOO BIG, ", siz);
				printf("increase NFS_MAXPACKET\n");
			}
			m = nd->nd_mreq;
			mbuf_pkthdr_setlen(m, siz);
			error = mbuf_pkthdr_setrcvif(m, NULL);
			if (error)
				panic("nfsd setrcvif failed: %d", error);
			/*
			 * For stream protocols, prepend a Sun RPC
			 * Record Mark.
			 */
			if (slp->ns_sotype == SOCK_STREAM) {
				error = mbuf_prepend(&m, NFSX_UNSIGNED, MBUF_WAITOK);
				if (!error)
					*(u_long*)mbuf_data(m) = htonl(0x80000000 | siz);
			}
			if (!error) {
				if (slp->ns_flag & SLP_VALID)
				    error = newnfs_send(slp->ns_so, nd->nd_nam2, m, NULL, p);
				else
				    error = EPIPE;
			}
			if (error)
				mbuf_freem(m);
			if (nd->nd_nam2)
				mbuf_freem(nd->nd_nam2);
			if (nd->nd_mrep)
				mbuf_freem(nd->nd_mrep);
			if (error == EPIPE) {
				lck_rw_lock_exclusive(&slp->ns_rwlock);
				nfsrvd_zapsock(slp);
				lck_rw_done(&slp->ns_rwlock);
			}
			if (error == EINTR || error == ERESTART) {
				nfsrv_slpderef(slp, 1);
				goto done;
			}
			break;
		    case RC_DROPIT:
			mbuf_freem(nd->nd_mrep);
			if (nd->nd_nam2)
				mbuf_freem(nd->nd_nam2);
			break;
		};

		lck_rw_lock_exclusive(&slp->ns_rwlock);
		if (nfsrvd_dorec(slp, nfscbd, nd, p, 1)) {
			lck_rw_done(&slp->ns_rwlock);
			nfscbd->nfsd_flag &= ~NFSD_REQINPROG;
			nfscbd->nfsd_slp = NULL;
			nfsrv_slpderef(slp, 1);
		} else {
			lck_rw_done(&slp->ns_rwlock);
		}
	}
done:
	TAILQ_REMOVE(&newnfscbd_head, nfscbd, nfsd_chain);
	nfs_numnfscbd--;
	FREE((caddr_t)nd, M_NFSNFSDDESC);
	FREE((caddr_t)nfscbd, M_NFSD);
	return (error);
}

/*
 * initialize NFS's cache of mbuf constants
 */
static void
newncl_mbuf_init(void)
{
	struct mbuf_stat ms;

	mbuf_stats(&ms);
	ncl_mbuf_mlen = ms.mlen;
	ncl_mbuf_mhlen = ms.mhlen;
	ncl_mbuf_minclsize = ms.minclsize;
	ncl_mbuf_mclbytes = ms.mclbytes;
}

/*
 * Search for a sleeping nfscbd and wake it up.
 * SIDE EFFECT: If none found, set NFSD_CHECKSLP flag, so that one of the
 * running nfscbds will go look for the work in the nfssvc_sock list.
 */
static void
nfsrvd_wakenfscbd(struct nfssvc_sock *slp)
{
	struct nfsd *nd;

	if ((slp->ns_flag & SLP_VALID) == 0)
		return;
	TAILQ_FOREACH(nd, &newnfscbd_head, nfsd_chain) {
		if (nd->nfsd_flag & NFSD_WAITING) {
			nd->nfsd_flag &= ~NFSD_WAITING;
			if (nd->nfsd_slp)
				panic("nfsd wakeup");
			slp->ns_sref++;
			nd->nfsd_slp = slp;
			wakeup((caddr_t)nd);
			return;
		}
	}
	slp->ns_flag |= SLP_DOREC;
	newnfscbd_head_flag |= NFSD_CHECKSLP;
}

/*
 * cleanup and release a server socket structure.
 */
APPLESTATIC void
nfsrvd_slpfree(struct nfssvc_sock *slp)
{

	if (slp->ns_so) {
		sock_release(slp->ns_so);
		slp->ns_so = NULL;
	}
	if (slp->ns_nam)
		mbuf_free(slp->ns_nam);
	if (slp->ns_raw)
		mbuf_freem(slp->ns_raw);
	if (slp->ns_rec)
		mbuf_freem(slp->ns_rec);
	slp->ns_nam = slp->ns_raw = slp->ns_rec = NULL;


	lck_rw_destroy(&slp->ns_rwlock, nfs_slp_rwlock_group);
	FREE(slp, M_NFSSVC);
}

/*
 * Port specific initialization for stuff used by both the client and
 * server. Called early in the nfssvc() call and early in mounting.
 * After the first time, it just returns. Ideally calls to this function
 * are locked by some SMP lock already available to the system, such as
 * Giant for FreeBSD.
 */
APPLESTATIC void
newnfs_portinit(void)
{
	static int inited = 0;

	if (inited)
		return;
	/* Initialize SMP locks used by both client and server. */
	nfs_gss_lck_grp_attr = lck_grp_attr_alloc_init();
	nfs_gss_lck_grp = lck_grp_alloc_init("nfs_gss", nfs_gss_lck_grp_attr);
	nfs_gss_lck_attr = lck_attr_alloc_init();
	nfs_gss_mutex = lck_mtx_alloc_init(nfs_gss_lck_grp, nfs_gss_lck_attr);
	nfs_gssclhandle_lck_grp_attr = lck_grp_attr_alloc_init();
	nfs_gssclhandle_lck_grp = lck_grp_alloc_init("nfs_gssclhandle", nfs_gssclhandle_lck_grp_attr);
	nfs_gssclhandle_lck_attr = lck_attr_alloc_init();
	nfs_gssclhandle_mutex = lck_mtx_alloc_init(nfs_gssclhandle_lck_grp, nfs_gssclhandle_lck_attr);
	nfs_nameid_lck_grp_attr = lck_grp_attr_alloc_init();
	nfs_nameid_lck_grp = lck_grp_alloc_init("nfs_nameid", nfs_nameid_lck_grp_attr);
	nfs_nameid_lck_attr = lck_attr_alloc_init();
	nfs_nameid_slock = lck_spin_alloc_init(nfs_nameid_lck_grp, nfs_nameid_lck_attr);
	newnfsd_lck_grp_attr = lck_grp_attr_alloc_init();
	newnfsd_lck_grp = lck_grp_alloc_init("newnfsd", newnfsd_lck_grp_attr);
	newnfsd_lck_attr = lck_attr_alloc_init();
	newnfsd_mtx = lck_mtx_alloc_init(newnfsd_lck_grp, newnfsd_lck_attr);
	nfs_state_lck_grp_attr = lck_grp_attr_alloc_init();
	nfs_state_lck_grp = lck_grp_alloc_init("nfs_state", nfs_state_lck_grp_attr);
	nfs_state_lck_attr = lck_attr_alloc_init();
	nfs_state_slock = lck_spin_alloc_init(nfs_state_lck_grp, nfs_state_lck_attr);
	nfs_sockl_lck_grp_attr = lck_grp_attr_alloc_init();
	nfs_sockl_lck_grp = lck_grp_alloc_init("nfs_sockl", nfs_sockl_lck_grp_attr);
	nfs_sockl_lck_attr = lck_attr_alloc_init();
	nfs_sockl_mutex = lck_mtx_alloc_init(nfs_sockl_lck_grp, nfs_sockl_lck_attr);
	nfs_req_lck_grp_attr = lck_grp_attr_alloc_init();
	nfs_req_lck_grp = lck_grp_alloc_init("nfs_req", nfs_req_lck_grp_attr);
	nfs_req_lck_attr = lck_attr_alloc_init();
	nfs_req_slock = lck_mtx_alloc_init(nfs_req_lck_grp, nfs_req_lck_attr);
	nfs_export_lock_attr = lck_attr_alloc_init();
	nfs_export_group_attr = lck_grp_attr_alloc_init();
	nfs_export_rwlock_group = lck_grp_alloc_init("newnfs-export-rwlock", nfs_export_group_attr);
	lck_rw_init(&newnfs_export_rwlock, nfs_export_rwlock_group, nfs_export_lock_attr);
	nfs_slp_lock_attr = lck_attr_alloc_init();
	nfs_slp_group_attr = lck_grp_attr_alloc_init();
	nfs_slp_rwlock_group = lck_grp_alloc_init("newnfs-slp-rwlock", nfs_slp_group_attr);
	TAILQ_INIT(&newnfs_deadsockhead);
	TAILQ_INIT(&newnfscb_deadsockhead);
	newnfs_init();
	inited = 1;
}

APPLESTATIC void
nfsvno_getfs(__unused struct nfsfsinfo *fsp, __unused struct nfssvc_sock *slp)
{

}

APPLESTATIC int
nfsvno_v4rootexport(__unused struct nfsrv_descript *nd)
{

	return (0);
}

/*
 * Set the credentials to refer to root.
 * If only the various BSDen could agree on whether cr_gid is a separate
 * field or cr_groups[0]...
 */
APPLESTATIC void
newnfs_setroot(struct ucred *cred)
{

	cred->cr_uid = 0;
	cred->cr_groups[0] = 0;
	cred->cr_ngroups = 1;
}

/*
 * Get the client credential. Used for Renew and recovery.
 */
APPLESTATIC struct ucred *
newnfs_getcred(void)
{
	struct ucred *cred;

	MALLOC(cred, struct ucred *, sizeof (struct ucred), M_TEMP, M_WAITOK);
	bzero(cred, sizeof (struct ucred));
	cred->cr_ref = 1;
	newnfs_setroot(cred);
	return (cred);
}

APPLESTATIC struct ucred *
newnfs_creddup(struct ucred *cred)
{
	struct ucred *newcred;
	int i;

	MALLOC(newcred, struct ucred *, sizeof (struct ucred), M_TEMP, M_WAITOK);
	bzero(newcred, sizeof (struct ucred));
	newcred->cr_ref = 1;
	newcred->cr_uid = cred->cr_uid;
	newcred->cr_ngroups = cred->cr_ngroups;
	for (i = 0; i < cred->cr_ngroups; i++)
		newcred->cr_groups[i] = cred->cr_groups[i];
	return (newcred);
}

