\name{nnls}
\alias{nnls}
\alias{pnnls}
\alias{pnnqp}
%- Also NEED an '\alias' for EACH other topic documented here.

\title{Least Squares and Quadratic Programming under Nonnegativity Constraints}
\description{
  
   \code{nnls} solves the least squares problem under nonnegativity (NN)
  constraints. It is an R interface to the NNLS function that is
  described in Lawson and Hanson (1974, 1995). Its Fortran
  implementation is public domain and available at
  \url{http://www.netlib.org/lawson-hanson}.

  \code{pnnls} solves the least squares problem when only part of the
  coefficients are subject to nonnegativity constraints. It also allows
  the NN-restricted coefficients to be further restricted to have a
  fixed positive sum.

  \code{pnnqp} solves the quadratic programming problem when solution
  values are partly subject to nonnegativity constraints. It also allows
  the NN-restricted coefficients to be further restricted to have a
  fixed positive sum.

  These functions are particularly useful for finding zeros exactly. 

}

\usage{
nnls(a, b) 
pnnls(a, b, k=0, sum=NULL) 
pnnqp(q, p, k=0, sum=NULL, tol=1e-20)
}

\arguments{
  
  \item{a}{Design matrix.}
  
  \item{b}{Response vector.} 
  
  \item{k}{Integer, meaning that the first \code{k} coefficients are not
  NN-restricted.} 
  
  \item{sum}{= NULL, if NN-restricted coefficients are not further
    restricted to have a fixed sum; 

    = a positive value, if NN-restricted coefficients are further
    restricted to have a fixed positive sum.}
  
  \item{q}{Positive semidefinite matrix of numeric values for the
    quadratic term of a quadratic programming problem.}
  
  \item{p}{Vector of numeric values for the linear term of a quadratic
    programming problem.}

  \item{tol}{Tolerance used for calculating pseudo-rank of \code{q}.}
}

\details{
  Given matrix \code{a} and vector \code{b}, \code{nnls} solves the
  nonnegativity least squares problem:
  
  \deqn{\mathrm{minimize \ \ } || a x - b ||,}{minimize  || a x - b ||,} \deqn{\mathrm{\ \ \ subject\ to\ \ } x \ge 0.}{ subject to  x >= 0.}
  
  Function \code{pnnls} also solves the above nonnegativity least
  squares problem when \code{k=0}, but it may also leave the first
  \code{k} coefficients unrestricted. The output value of \code{k} can
  be different from the input, if \code{a} has linearly dependent
  columns. If \code{sum} is a positive value, \code{pnnls} solves the
  problem by further restricting that the NN-restricted coefficients
  must sum to the given value.

  Function \code{pnnqp} solves the quadratic programming problem

  \deqn{\mathrm{minimize\ \ } \frac12 x^T q x + p^T x,}{minimize 0.5 x^T q x + p^T x,}

  when only some or all coefficients are restricted by
  nonnegativity. The quadratic programming problem is solved by
  transforming the problem into a least squares one under the same
  constraints, which is then solved by function \code{pnnls}. Arguments
  \code{k} and \code{sum} have the same meanings as for \code{pnnls}.

  Functions \code{nnls}, \code{pnnls} and \code{pnnqp} are able to
  return any zero-valued solution as 0 exactly. This differs from
  functions \code{lsei} and \code{qp}, which may produce very small
  values for exactly 0s, thanks to numerical errors.

}

\value{
  
  \item{x}{Solution}
  
  \item{r}{The upper-triangular matrix \code{Q*a}, pivoted by variables
    in the order of \code{index}, when \code{sum=NULL}. If \code{sum
    > 0}, \code{r} is for the transformed \code{a}.}
  
  \item{b}{The vector \code{Q*b}, pivoted by variables in the order of
    \code{index}, when \code{sum=NULL}. If \code{sum > 0}, \code{b} is
    for the transformed \code{b}.}
  
  \item{index}{Indices of the columns of \code{r}; those unrestricted
    and in the positive set are first given, and then those in the zero
    set.}
  
  \item{rnorm}{Euclidean norm of the residual vector.}
  
  \item{mode}{= 1, successful computation;

    = 2, bad dimensions of the problem;

    = 3, iteration count exceeded (more than 3 times the number
    of variables iterations).}

  \item{k}{Number of the first few coefficients that are truly not
  NN-restricted.} 
  
}

\author{ Yong Wang <yongwang@auckland.ac.nz>}

\references{

  Lawson and Hanson (1974, 1995). Solving Least Squares
  Problems. Englewood Cliffs, N.J., Prentice-Hall.

  Dax (1990). The smallest point of a polytope. Journal of Optimization
  Theory and Applications, 64, pp. 429-432.
  
  Wang (2010). Fisher scoring: An interpolation family and its Monte
  Carlo implementations. Computational Statistics and Data Analysis, 54,
  pp. 1744-1755.
  
}

\seealso{ \code{\link{lsei}}, \code{\link{hfti}}. }

\examples{
a = matrix(rnorm(40), nrow=10)
b = drop(a \%*\% c(0,1,-1,1)) + rnorm(10)
nnls(a, b)$x                     # constraint x >= 0
pnnls(a, b, k=0)$x               # same as nnls(a, b)
pnnls(a, b, k=2)$x               # first two coeffs are not NN-constrained
pnnls(a, b, k=2, sum=1)$x        # NN-constrained coeffs must sum to 1
pnnls(a, b, k=2, sum=2)$x        # NN-constrained coeffs must sum to 2
q = crossprod(a)
p = -drop(crossprod(b, a))
pnnqp(q, p, k=2, sum=2)$x        # same solution

pnnls(a, b, sum=1)$x             # zeros found exactly
pnnqp(q, p, sum=1)$x             # zeros found exactly
lsei(a, b, rep(1,4), 1, lower=0) # zeros not so exact
}
\keyword{ array }   % at least one, from doc/KEYWORDS
\keyword{ algebra }   % at least one, from doc/KEYWORDS
