/*
 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
 *
 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
 * 
 * This file contains Original Code and/or Modifications of Original Code
 * as defined in and that are subject to the Apple Public Source License
 * Version 2.0 (the 'License'). You may not use this file except in
 * compliance with the License. The rights granted to you under the License
 * may not be used to create, or enable the creation or redistribution of,
 * unlawful or unlicensed copies of an Apple operating system, or to
 * circumvent, violate, or enable the circumvention or violation of, any
 * terms of an Apple operating system software license agreement.
 * 
 * Please obtain a copy of the License at
 * http://www.opensource.apple.com/apsl/ and read it before using this file.
 * 
 * The Original Code and all software distributed under the License are
 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
 * Please see the License for the specific language governing rights and
 * limitations under the License.
 * 
 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
 */
/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
/*
 * Copyright (c) 1989, 1993
 *	The Regents of the University of California.  All rights reserved.
 *
 * This code is derived from software contributed to Berkeley by
 * Rick Macklem at The University of Guelph.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. All advertising materials mentioning features or use of this software
 *    must display the following acknowledgement:
 *	This product includes software developed by the University of
 *	California, Berkeley and its contributors.
 * 4. Neither the name of the University nor the names of its contributors
 *    may be used to endorse or promote products derived from this software
 *    without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 */
/*
 * This file includes modifications of code found in the Apple XNU Kernel
 * and has been modifed for use in this Kext by Rick Macklem, May 2006.
 */

/*
 * These functions support the macros and help fiddle mbuf chains for
 * the nfs op functions. They do things like create the rpc header and
 * copy data between mbuf chains and uio lists.
 */
#ifndef APPLEKEXT
#include <sys/param.h>
#include <sys/proc.h>
#include <sys/kauth.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/mount_internal.h>
#include <sys/vnode_internal.h>
#include <sys/kpi_mbuf.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/malloc.h>
#include <sys/syscall.h>
#include <sys/sysctl.h>
#include <sys/ubc_internal.h>
#include <sys/fcntl.h>
#include <sys/uio_internal.h>
#include <sys/domain.h>
#include <sys/user.h>
#include <libkern/OSAtomic.h>

#include <sys/vm.h>
#include <sys/vmparam.h>

#include <sys/time.h>
#include <kern/clock.h>

#include <newnfs/nfs/nfsport.h>
#include <newnfs/nfsclient/nfsnode.h>
#include <newnfs/nfsclient/nfsmount.h>
#include <newnfs/nfsclient/nfsclient_var.h>

#include <miscfs/specfs/specdev.h>

#include <netinet/in.h>
#if ISO
#include <netiso/iso.h>
#endif

#include <sys/kdebug.h>

extern struct nfsstats newnfsstats;
extern int ncl_mbuf_mlen, ncl_mbuf_mhlen,
    ncl_mbuf_minclsize, ncl_mbuf_mclbytes;
extern proc_t ncl_iodwant[NFS_MAXASYNCDAEMON];
extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
extern int ncl_ioddelwri;
extern lck_mtx_t *ncl_buf_mutex;
extern lck_spin_t *ncl_nfsnode_slock;
extern int nclbufdelwricnt;
extern struct nfsbuffreehead nclbufdelwri;
int ncl_mount_type, ncl_resv_mounts = 0;
lck_mtx_t *ncl_iod_mutex;
int ncl_defect = 0;

#define FSDBG(A, B, C, D, E) \
	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, (A))) | DBG_FUNC_NONE, \
		(int)(B), (int)(C), (int)(D), (int)(E), 0)
#define FSDBG_TOP(A, B, C, D, E) \
	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, (A))) | DBG_FUNC_START, \
		(int)(B), (int)(C), (int)(D), (int)(E), 0)
#define FSDBG_BOT(A, B, C, D, E) \
	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, (A))) | DBG_FUNC_END, \
		(int)(B), (int)(C), (int)(D), (int)(E), 0)
LIST_HEAD(nfsnodehashhead, nfsnode);
#endif	/* !APPLEKEXT */

#if !defined(NFSCLIENT) && !defined(APPLEKEXT)
SYSCTL_DECL(_vfs_generic);
SYSCTL_NODE(_vfs_generic, OID_AUTO, nfs, CTLFLAG_RW, 0, "nfs hinge");
#endif

static lck_grp_t *ncl_iod_lck_grp;
static lck_grp_attr_t *ncl_iod_lck_grp_attr;
static lck_attr_t *ncl_iod_lck_attr;

/* XXX CSM 11/25/97 Upgrade sysctl.h someday */
#ifdef notyet
SYSCTL_INT(_vfs_nfs, OID_AUTO, defect, CTLFLAG_RW, &ncl_defect, 0, "");
#endif

/*
 * Called once to initialize data structures...
 */
APPLESTATIC int
ncl_init(struct vfsconf *vfsp)
{
	int i;

	ncl_mount_type = vfsp->vfc_typenum;
	/* Ensure async daemons disabled */
	for (i = 0; i < NFS_MAXASYNCDAEMON; i++) {
		ncl_iodwant[i] = NULL;
		ncl_iodmount[i] = NULL;
	}
	/* init nfsiod mutex */
	ncl_iod_lck_grp_attr = lck_grp_attr_alloc_init();
	ncl_iod_lck_grp = lck_grp_alloc_init("ncl_iod", ncl_iod_lck_grp_attr);
	ncl_iod_lck_attr = lck_attr_alloc_init();
	ncl_iod_mutex = lck_mtx_alloc_init(ncl_iod_lck_grp, ncl_iod_lck_attr);

	ncl_nbinit();			/* Init the nfsbuf table */
	ncl_nhinit();			/* Init the nfsnode table */

	/* make sure mbuf constants are set up */
	if (!ncl_mbuf_mlen)
		ncl_mbuf_init();

#ifdef notyet
	/* init nfsd mutex */
	nfsd_lck_grp_attr = lck_grp_attr_alloc_init();
#ifdef notdef
	lck_grp_attr_setstat(nfsd_lck_grp_attr);
#endif
	nfsd_lck_grp = lck_grp_alloc_init("nfsd", nfsd_lck_grp_attr);
	nfsd_lck_attr = lck_attr_alloc_init();
	nfsd_mutex = lck_mtx_alloc_init(nfsd_lck_grp, nfsd_lck_attr);

	/* init slp rwlock */
	nfs_slp_lock_attr    = lck_attr_alloc_init();
	nfs_slp_group_attr   = lck_grp_attr_alloc_init();
	nfs_slp_rwlock_group = lck_grp_alloc_init("nfs-slp-rwlock", nfs_slp_group_attr);
	nfs_slp_mutex_group  = lck_grp_alloc_init("nfs-slp-mutex", nfs_slp_group_attr);

	/* init export data structures */
	nfsexphashtbl = hashinit(8, M_TEMP, &nfsexphash);
	LIST_INIT(&nfs_exports);
	nfs_export_lock_attr    = lck_attr_alloc_init();
	nfs_export_group_attr   = lck_grp_attr_alloc_init();
	nfs_export_rwlock_group = lck_grp_alloc_init("nfs-export-rwlock", nfs_export_group_attr);
	lck_rw_init(&nfs_export_rwlock, nfs_export_rwlock_group, nfs_export_lock_attr);

	lck_mtx_lock(nfsd_mutex);
	nfsrv_init(0);			/* Init server data structures */
	nfsrv_initcache();		/* Init the server request cache */
	lck_mtx_unlock(nfsd_mutex);
#endif

	vfsp->vfc_refcount++; /* make us non-unloadable */
	return (0);
}

/*
 * initialize NFS's cache of mbuf constants
 */
APPLESTATIC void
ncl_mbuf_init(void)
{
	struct mbuf_stat ms;

	mbuf_stats(&ms);
	ncl_mbuf_mlen = ms.mlen;
	ncl_mbuf_mhlen = ms.mhlen;
	ncl_mbuf_minclsize = ms.minclsize;
	ncl_mbuf_mclbytes = ms.mclbytes;
}

/*
 * Calculate the attribute timeout based on
 * how recently the file has been modified.
 */
APPLESTATIC int
ncl_attrcachetimeout(vnode_t vp)
{
	struct nfsnode *np = VTONFS(vp);
	struct nfsmount *nmp;
	struct timeval now;
	int isdir, timeo;

	if (!(nmp = VFSTONFS(vnode_mount(vp))))
		return (0);

	isdir = vnode_isdir(vp);

	if ((np)->n_flag & NMODIFIED)
		timeo = isdir ? nmp->nm_acdirmin : nmp->nm_acregmin;
	else {
		/* Note that if the client and server clocks are way out of sync, */
		/* timeout will probably get clamped to a min or max value */
		microtime(&now);
		timeo = (now.tv_sec - (np)->n_mtime.tv_sec) / 10;
		if (isdir) {
			if (timeo < nmp->nm_acdirmin)
				timeo = nmp->nm_acdirmin;
			else if (timeo > nmp->nm_acdirmax)
				timeo = nmp->nm_acdirmax;
		} else {
			if (timeo < nmp->nm_acregmin)
				timeo = nmp->nm_acregmin;
			else if (timeo > nmp->nm_acregmax)
				timeo = nmp->nm_acregmax;
		}
	}

	return (timeo);
}

/*
 * Check the time stamp
 * If the cache is valid, copy contents to *nvaper and return 0
 * otherwise return an error
 */
APPLESTATIC int
ncl_getattrcache(vp, nvaper)
	vnode_t vp;
	struct nfs_vattr *nvaper;
{
	struct nfsnode *np = VTONFS(vp);
	struct nfs_vattr *nvap;
	struct timeval nowup;
	int32_t timeo;
	u_int64_t orig_size;
	int cnt;

	if (!NATTRVALID(np)) {
		FSDBG(528, vp, 0, 0, 0);
		OSAddAtomic(1, (SInt32*)&newnfsstats.attrcache_misses);
		return (ENOENT);
	}

	timeo = ncl_attrcachetimeout(vp);

	microuptime(&nowup);
	if ((nowup.tv_sec - np->n_attrstamp) >= timeo) {
		FSDBG(528, vp, 0, 0, 1);
		OSAddAtomic(1, (SInt32*)&newnfsstats.attrcache_misses);
		return (ENOENT);
	}
	nvap = &np->n_vattr.na_vattr;
	if ((VFSTONFS(vnode_mount(vp))->nm_flag & NFSMNT_DIRECTIO) &&
	    nvap->nva_type == VREG) {
		OSAddAtomic(1, (SInt32*)&newnfsstats.attrcache_misses);
		return (ENOENT);
	}
	FSDBG(528, vp, 0, 0, 2);
	OSAddAtomic(1, (SInt32*)&newnfsstats.attrcache_hits);

	if (nvap->nva_size != np->n_size) {
		FSDBG(528, vp, nvap->nva_size, np->n_size,
		      (nvap->nva_type == VREG) |
		      (np->n_flag & NMODIFIED ? 6 : 4));
		if (nvap->nva_type == VREG) {
			cnt = 0;
			do {
				lck_spin_lock(ncl_nfsnode_slock);
				if (np->n_flag & NMODIFIED) {
					if (nvap->nva_size < np->n_size)
						nvap->nva_size = np->n_size;
					else
						np->n_size = nvap->nva_size;
				} else
					np->n_size = nvap->nva_size;
				lck_spin_unlock(ncl_nfsnode_slock);
				orig_size = np->n_size;
				ubc_setsize(vp, (off_t)np->n_size); /* XXX */
#ifdef DIAGNOSTIC
				if (np->n_size != orig_size)
					printf("ncl_getattrcache: ns=%qd os=%qd\n",
					    np->n_size, orig_size);
#endif
			} while (np->n_size != orig_size && cnt++ < 5);
		} else
			np->n_size = nvap->nva_size;
	}

	bcopy((caddr_t)nvap, (caddr_t)nvaper, sizeof(struct nfs_vattr));
	if (np->n_flag & NCHG) {
		if (np->n_flag & NACC)
			nvaper->nva_atime = np->n_atim;
		if (np->n_flag & NUPD)
			nvaper->nva_mtime = np->n_mtim;
	}
	return (0);
}

static nfsuint64 nfs_nullcookie = { { 0, 0 } };
/*
 * This function finds the directory cookie that corresponds to the
 * logical byte offset given.
 * XXX This function is not SMP safe w.r.t. the nfsnode fields.
 * My code never does directory reads through the buffer cache,
 * but if that is enabled, then this must be fixed, since it
 * will then be called by the iod threads. This is not as easy
 * as it might look, since the MALLOC_ZONEs can block.
 */
nfsuint64 *
ncl_getcookie(struct nfsnode *np, off_t off, int64_t siz, int add)
{
	struct nfsdmap *dp, *dp2;
	int pos;

	pos = off / siz;
	if (pos == 0) {
#if DIAGNOSTIC
		if (add)
			panic("nfs getcookie add at 0");
#endif
		return (&nfs_nullcookie);
	}
	pos--;
	dp = np->n_cookies.lh_first;
	if (!dp) {
		if (add) {
			MALLOC_ZONE(dp, struct nfsdmap *, sizeof(struct nfsdmap),
					M_NFSDIROFF, M_WAITOK);
			if (!dp)
				return ((nfsuint64 *)0);
			dp->ndm_eocookie = 0;
			LIST_INSERT_HEAD(&np->n_cookies, dp, ndm_list);
		} else
			return ((nfsuint64 *)0);
	}
	while (pos >= NFSNUMCOOKIES) {
		pos -= NFSNUMCOOKIES;
		if (dp->ndm_list.le_next) {
			if (!add && dp->ndm_eocookie < NFSNUMCOOKIES &&
				pos >= dp->ndm_eocookie)
				return ((nfsuint64 *)0);
			dp = dp->ndm_list.le_next;
		} else if (add) {
			MALLOC_ZONE(dp2, struct nfsdmap *, sizeof(struct nfsdmap),
					M_NFSDIROFF, M_WAITOK);
			if (!dp2)
				return ((nfsuint64 *)0);
			dp2->ndm_eocookie = 0;
			LIST_INSERT_AFTER(dp, dp2, ndm_list);
			dp = dp2;
		} else
			return ((nfsuint64 *)0);
	}
	if (pos >= dp->ndm_eocookie) {
		if (add)
			dp->ndm_eocookie = pos + 1;
		else
			return ((nfsuint64 *)0);
	}
	return (&dp->ndm_cookies[pos]);
}

/*
 * Invalidate cached directory information, except for the actual directory
 * blocks (which are invalidated separately).
 * Done mainly to avoid the use of stale offset cookies.
 */
APPLESTATIC void
ncl_invaldir(vp)
	vnode_t vp;
{
	struct nfsnode *np = VTONFS(vp);

#if DIAGNOSTIC
	if (vnode_vtype(vp) != VDIR)
		panic("nfs: invaldir not dir");
#endif
	np->n_direofoffset = 0;
	OSBitAndAtomic((int32_t)~NDIREOFVALID, (UInt32 *)&np->n_flag);
	np->n_cookieverf.nfsuquad[0] = 0;
	np->n_cookieverf.nfsuquad[1] = 0;
	if (np->n_cookies.lh_first)
		np->n_cookies.lh_first->ndm_eocookie = 0;
}

/*
 * Continuation for Asynchronous I/O daemons for client nfs.
 */
static void
newnfs_iod_continue(struct nfsmount *nmp, struct nfsasynciothread *atp,
    proc_t p)
{
	struct nfsbuf *bp;
	int i, error = 0;

	/*
	 * Just loop around doin our stuff until SIGKILL
	 *  - actually we don't loop with continuations...
	 */
	lck_mtx_lock(ncl_iod_mutex);
	for (;;) {
		while ((nmp->nm_state & NFSSTA_MOUNTED) &&
		   TAILQ_EMPTY(&nmp->nm_bufq) &&
		   error == 0 && ncl_ioddelwri == 0) {
			atp->nfsio_ready = 1;
			(void) msleep((caddr_t)atp, ncl_iod_mutex,
			    PDROP | PWAIT , "nfsidl", 0);
			lck_mtx_lock(ncl_iod_mutex);
		}
		if (error || (nmp->nm_state & NFSSTA_MOUNTED) == 0) {
			lck_mtx_unlock(ncl_iod_mutex);
			return;
		}
		while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) {
		    /* Take one off the front of the list */
		    TAILQ_REMOVE(&nmp->nm_bufq, bp, nb_free);
		    bp->nb_free.tqe_next = NFSNOLIST;
		    nmp->nm_bufqlen--;
		    if (nmp->nm_bufqwant && nmp->nm_bufqlen < 2 * nmp->nm_numasync) {
			nmp->nm_bufqwant = FALSE;
			lck_mtx_unlock(ncl_iod_mutex);
			wakeup(&nmp->nm_bufq);
		    } else {
			lck_mtx_unlock(ncl_iod_mutex);
		    }

		    SET(bp->nb_flags, NB_IOD);
		    if (ISSET(bp->nb_flags, NB_READ))
			ncl_doio(bp, bp->nb_rcred, NULL);
		    else
			ncl_doio(bp, bp->nb_wcred, NULL);

		    lck_mtx_lock(ncl_iod_mutex);
		}

		if (ncl_ioddelwri) {
			ncl_ioddelwri = 0;
			lck_mtx_unlock(ncl_iod_mutex);
			i = 0;
			lck_mtx_lock(ncl_buf_mutex);
			while (i < 8 && (bp = TAILQ_FIRST(&nclbufdelwri)) != NULL) {
				struct nfsnode *np = VTONFS(bp->nb_vp);
				ncl_buf_remfree(bp);
				ncl_buf_refget(bp);
				while ((error = ncl_buf_acquire(bp, 0, 0, 0)) == EAGAIN);
				ncl_buf_refrele(bp);
				if (error)
					break;
				if (!bp->nb_vp) {
					/* buffer is no longer valid */
					ncl_buf_drop(bp);
					continue;
				}
				if (ISSET(bp->nb_flags, NB_NEEDCOMMIT))
					ncl_buf_check_write_verifier(np, bp);
				if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
					/* put buffer at end of delwri list */
					TAILQ_INSERT_TAIL(&nclbufdelwri, bp, nb_free);
					nclbufdelwricnt++;
					ncl_buf_drop(bp);
					lck_mtx_unlock(ncl_buf_mutex);
					ncl_flushcommits(np->n_vnode, NULL, 1);
				} else {
					SET(bp->nb_flags, (NB_ASYNC | NB_IOD));
					lck_mtx_unlock(ncl_buf_mutex);
					ncl_buf_write(bp);
				}
				i++;
				lck_mtx_lock(ncl_buf_mutex);
			}
			lck_mtx_unlock(ncl_buf_mutex);
			lck_mtx_lock(ncl_iod_mutex);
		}

	}
}

