\name{conreg}
\alias{conreg}
\title{Least squares and logistic regression using SCAD, MCP and SICA penalties.}
\description{This package implements a coordinate majorization descent algorithm for fitting regularization paths of Least Squares and logistic regression using SCAD, MCP and SICA penalties.}
\usage{
conreg(x, y, 
family = c("ls", "logit"), 
penalty = c("scad", "mcp", "sica"), a, 
nlambda = 100, 
lambda.factor = ifelse(nobs < nvars, 0.01, 1e-04), 
lambda = NULL, lambda2 = 0,
exclude, dfmax = nvars + 1, 
pmax = min(dfmax * 1.2, nvars), 
standardize = TRUE, eps = 1e-06, maxit = 1e+05)
}
\arguments{
		\item{x}{matrix of predictors, of dimension \eqn{N \times p}{N*p}; each row is an observation vector.}

		\item{y}{response variable. This argument should be quantitative for Least Squares and a two-level factor for logistic regression.}

		\item{family}{currently support two families, valid options are:
		\itemize{
		\item \code{"ls"} Least Squares
		\item \code{"logit"} Logistic regression
		}}

		\item{penalty}{a character string specifying the name of the penalty to use. Currently available options are "scad", "mcp" and "sica". 
		\itemize{
		\item \code{"scad"} SCAD
		\deqn{P_{\lambda,a}^{'}(|\theta|)=\lambda I(|\theta|\leq\lambda)+\frac{(a\lambda-|\theta|)_{+}}{a-1}I(|\theta|>\lambda)}{dP(|theta|)/d|theta|=lambda* I(|theta|<=lambda)+(a*lambda-|theta|)*I(a*lambda-|theta|)*I(|theta|>lambda)/(a-1)}
		\item \code{"mcp"} MCP
		\deqn{P_{\lambda,a}^{'}(|\theta|)=\frac{(a\lambda-|\theta|)_{+}}{a}}{P(|theta|)=(a*lambda-|theta|)*I(a*lambda-|theta|)/a}
		\item \code{"sica"} SICA
		\deqn{P_{\lambda,a}(|\theta|)=\lambda\frac{(a+1)|\theta|}{a+|\theta|}}{P(|theta|)=lambda*(a+1)*|theta|/(a+|theta|)}
		}}
		\item{a}{the parameter for non-convex penalties. The default depends on the
		\code{penalty}. If it is \code{"scad"}, the default is 3.7; if \code{"mcp"}, the default is 2.0; if \code{"sica"}, the default is 0.1. }
		
		\item{nlambda}{the number of \code{lambda} values - default is 100.}

		\item{lambda.factor}{The factor for getting the minimal lambda in \code{lambda} sequence, where \code{min(lambda) = lambda.factor * max(lambda)}.  \code{max(lambda)} is the smallest value of \code{lambda} for which all coefficients are zero. The default depends on the relationship between \eqn{N} (the number of rows in the matrix of predictors) and \eqn{p} (the number of predictors). If \eqn{N > p}, the default is \code{0.0001},
		close to zero.  If \eqn{N<p}, the default is \code{0.01}.
		A very small value of \code{lambda.factor} will lead to a saturated fit. It takes no effect if there is user-defined \code{lambda} sequence.} 
		\item{lambda}{a user supplied \code{lambda} sequence. Typically, by leaving this option unspecified users can have 
		the program compute its own \code{lambda} sequence based on
		\code{nlambda} and \code{lambda.factor}. Supplying a value of
		\code{lambda} overrides this. It is better to supply
		a decreasing sequence of \code{lambda} values than a single (small) value, if not, the program will sort user-defined \code{lambda} sequence in decreasing order automatically.}

		\item{lambda2}{regularization parameter \eqn{\lambda_2}{lambda2} for the quadratic penalty of the 
		coefficients. Default is \code{1e-5} for computing performance reasons. See details.}

		\item{exclude}{indices of variables to be excluded from the
		model. Default is none.}

		\item{dfmax}{limit the maximum number of variables in the
		model. Useful for very large \eqn{p}, if a partial path is desired. Default is \eqn{p+1}.}

		\item{pmax}{limit the maximum number of variables ever to be nonzero. For example once \eqn{\beta} enters the model, no matter how many times it exits or re-enters model through the path, it will be counted only once. Default is \code{min(dfmax*1.2,p)}.}

		\item{standardize}{logical flag for variable standardization, prior to
		fitting the model sequence. If \code{TRUE}, x matrix is normalized such that sum squares of each column \eqn{\sum^N_{i=1}x_{ij}^2=\frac{1}{N}}{<Xj,Xj>=1/N}. Note that x is always centered (i.e. \eqn{\sum^N_{i=1}x_{ij}=0}{sum(Xj)=0}) no matter \code{standardize} is \code{TRUE} or \code{FALSE}. The coefficients are always returned on
		the original scale. Default is is \code{TRUE}.}

		\item{eps}{convergence threshold for coordinate majorization descent. Each inner
		coordinate majorization descent loop continues until the relative change in any
		coefficient (i.e. \eqn{\max_j(\beta_j^{new}-\beta_j^{old})^2}{max(j)(beta_new[j]-beta_old[j])^2}) is less than \code{eps}. Defaults value is \code{1e-6}.}

		\item{maxit}{maximum number of outer-loop iterations allowed at fixed lambda value. Default is 1e+05. If models do not converge, consider increasing \code{maxit}.}
}

\details{
The algorithm estimates a linear functional \eqn{\beta_0+\boldsymbol{x}^{\mathrm{T}}\boldsymbol{\beta}}{beta_0+t(X)Beta} based on observed data, through penalized empirical loss minimization
\deqn{(\hat{\beta}_0,\hat{\boldsymbol{\beta}})=\arg\min(\mathrm{Loss}(\mathrm{Data},\beta_0,\boldsymbol{\beta})+\mathrm{Penalty}_{\lambda}(\boldsymbol{\beta}))}{argmin[Loss(Data,beta0,Beta)+P_lambda(Beta)]}
It can compute \eqn{(\hat{\beta}_0,\hat{\boldsymbol{\beta}})}{(beta0,Beta)} estimates at a fine grid of values of \eqn{\lambda}{lambda}s in order to pick up a data-driven optimal \eqn{\lambda}{lambda} for fitting a 'best' final model.  

So far, conreg covers the following losses and penalties.
\itemize{
\item Loss: Least squares, Logistic regression. 
\item Penalty: 
\itemize{
\item SCAD
\item MCP
\item SICA
\item Non-convex Net
}
}

Note that the loss function for Least Squares is 

\deqn{\frac{1}{2N}*\mathrm{RSS}}{0.5 * RSS/N} for logistic regression it is 

\deqn{-\frac{1}{N}*\log lik}{-loglik/N}

where \eqn{N} is the number of rows in the matrix of predictors.

The penalty is a combination of a quadratic term and a user-specified penalty

\deqn{\frac{1}{2}*\lambda_2 * ||\boldsymbol{\beta}||_2^2 +  P_\lambda(\boldsymbol{\beta})}{0.5 * lambda2 * Beta^2 + P_lambda(Beta)}

where \eqn{\lambda}{lambda} is \code{lambda}, \eqn{\lambda_2}{lambda2} is \code{lambda2}. Most importantly, users can specify the penalty by choosing different \eqn{\lambda_2}{lambda2} and penalty type. 

For example, for Non-convex SCAD net, set \code{lambda2} positive, and \code{penalty="scad"}.
\deqn{\frac{1}{2}*\lambda_2 * ||\boldsymbol{\beta}||_2^2 +  P_{\lambda}^{\mathrm{SCAD}}(\boldsymbol{\beta})}{0.5 * lambda2 * Beta^2 + P(SCAD)_lambda(Beta)}

For computing speed reason, if models are not converging or running slow, consider increasing \code{eps}, decreasing
\code{nlambda}, or increasing \code{lambda.factor} before increasing
\code{maxit}.

% For \code{"scad"} and \code{"mcp"}, for each coordinate within each iteration, it computes a closed-form solution of the quadratic majorization of the loss function and penalty. For \code{"sica"} it uses the first order approximation to replace the penalty.
}


\value{
An object with S3 class \code{\link{conreg}}.
		\item{call}{the call that produced this object}
		\item{b0}{intercept sequence of length \code{length(lambda)}}
		\item{beta}{a \code{p*length(lambda)} matrix of coefficients, stored as a sparse matrix (\code{dgCMatrix} class, the standard class for sparse numeric matrices in the \code{Matrix} package.). To convert it into normal type matrix use \code{as.matrix()}.}
		\item{lambda}{the actual sequence of \code{lambda} values used}
		\item{df}{the number of nonzero coefficients for each value of
		\code{lambda}.}
		\item{dim}{dimension of coefficient matrix (ices)}
		\item{npasses}{total number of iterations (the most inner loop) summed over all lambda values}
		\item{jerr}{error flag, for warnings and errors, 0 if no error.}
}

\author{Yi Yang and Hui Zou\cr
Maintainer: Yi Yang  <yiyang@umn.edu>}
\references{
Yang, Y. and Zou, H. (2012), "An Efficient Algorithm for Computing The HHSVM and Its Generalizations," \emph{Journal of Computational and Graphical Statistics}. Accepted\cr
BugReport: \url{http://code.google.com/p/conreg/}\cr
}


\seealso{\code{plot.conreg}}
\examples{

x=matrix(rnorm(100*10),100,10)
y=rnorm(100)
g=sample(0:1,100,replace=TRUE)

############################################
# Least Squares + SCAD
############################################

m1 <- conreg(y=y, x=x, family="ls", penalty="scad")
plot(m1)

############################################
# Logistic regression + MCP
############################################

m2 <- conreg(y=g, x=x, family="logit", penalty="mcp")
plot(m2)

}
\keyword{models}
\keyword{regression}
