% File src/library/stats/man/kmeans.Rd
% Part of the R package, https://www.R-project.org
% Copyright 1995-2025 R Core Team
% Distributed under GPL 2 or later

\name{kmeans}
\alias{kmeans}
\alias{print.kmeans}
\alias{fitted.kmeans}
\title{
K-Means Clustering
}
\description{
  Perform k-means clustering on a data matrix.
}
\usage{
kmeans(x, centers, iter.max = 10, nstart = 1,
       algorithm = c("Hartigan-Wong", "Lloyd", "Forgy",
                     "MacQueen"), trace = FALSE)
\method{fitted}{kmeans}(object, method = c("centers", "classes"), ...)
}
\arguments{
  \item{x}{numeric matrix of data, or an object that can be coerced to
    such a matrix (such as a numeric vector or a data frame with all
    numeric columns).}
  \item{centers}{either the number of clusters, say \eqn{k}, or a set of
    initial (distinct) cluster centres.  If a number, a random set of
    (distinct) rows in \code{x} is chosen as the initial centres.}
  \item{iter.max}{the maximum number of iterations allowed.}
  \item{nstart}{if \code{centers} is a number, how many random sets
    should be chosen?}
  \item{algorithm}{character: may be abbreviated.  Note that
    \code{"Lloyd"} and \code{"Forgy"} are alternative names for one
    algorithm.}
  \item{object}{an \R object of class \code{"kmeans"}, typically the
    result \code{ob} of \code{ob <- kmeans(..)}.}
  \item{method}{character: may be abbreviated. \code{"centers"} causes
    \code{fitted} to return cluster centers (one for each input point) and
    \code{"classes"} causes \code{fitted} to return a vector of class
    assignments.}
  \item{trace}{logical or integer number, currently only used in the
    default method (\code{"Hartigan-Wong"}): if positive (or true),
    tracing information on the progress of the algorithm is
    produced.  Higher values may produce more tracing information.}
  \item{\dots}{not used.}
}
\details{
  The data given by \code{x} are clustered by the \eqn{k}-means method,
  which aims to partition the points into \eqn{k} groups such that the
  sum of squares from points to the assigned cluster centres is minimized.
  At the minimum, all cluster centres are at the mean of their \I{Voronoi}
  sets (the set of data points which are nearest to the cluster centre).

  The algorithm of \bibcitet{R:Hartigan+Wong:1979} is used by
  default.  Note that some authors use \eqn{k}-means to refer to a
  specific algorithm rather than the general method: most commonly the
  algorithm given by \bibcitet{R:MacQueen:1967} but sometimes that given
  by \bibcitet{R:Lloyd:1957, R:Lloyd:1982} and \bibcitet{R:Forgy:1965}.
  The \I{Hartigan}--\I{Wong} algorithm generally does a better job than
  either of those, but trying several random starts (\code{nstart}\eqn{>
  1}) is often recommended.  In rare cases, when some of the points
  (rows of \code{x}) are extremely close, the algorithm may not converge
  in the \dQuote{Quick-Transfer} stage, signalling a warning (and
  returning \code{ifault = 4}).  Slight
  rounding of the data may be advisable in that case.

  For ease of programmatic exploration, \eqn{k = 1} is allowed, notably
  returning the center and \code{withinss}.

  Except for the \I{Lloyd}--\I{Forgy} method, \eqn{k} clusters will always be
  returned if a number is specified.
  If an initial matrix of centres is supplied, it is possible that
  no point will be closest to one or more centres, which is currently
  an error for the \I{Hartigan}--\I{Wong} method.
}
\note{
  The clusters are numbered in the returned object, but they are a
  \emph{set} and no ordering is implied.  (Their apparent ordering may
  differ by platform.)
}
\value{
  \code{kmeans} returns an object of class \code{"kmeans"} which has a
  \code{print} and a \code{fitted} method.  It is a list with at least
  the following components:
  \item{cluster}{
    A vector of integers (from \code{1:k}) indicating the cluster to
    which each point is allocated.
  }
  \item{centers}{A matrix of cluster centres.}
  \item{totss}{The total sum of squares.}
  \item{withinss}{Vector of within-cluster sum of squares,
    one component per cluster.}
  \item{tot.withinss}{Total within-cluster sum of squares,
    i.e.\sspace{}\code{sum(withinss)}.}
  \item{betweenss}{The between-cluster sum of squares,
    i.e.\sspace{}\code{totss-tot.withinss}.}
  \item{size}{The number of points in each cluster.}
  \item{iter}{The number of (outer) iterations.}
  \item{ifault}{integer: indicator of a possible algorithm problem
    -- for experts.}
}
\references{
  \bibshow{*}
}
\examples{
require(graphics)

# a 2-dimensional example
x <- rbind(matrix(rnorm(100, sd = 0.3), ncol = 2),
           matrix(rnorm(100, mean = 1, sd = 0.3), ncol = 2))
colnames(x) <- c("x", "y")
(cl <- kmeans(x, 2))
plot(x, col = cl$cluster)
points(cl$centers, col = 1:2, pch = 8, cex = 2)

# sum of squares
ss <- function(x) sum(scale(x, scale = FALSE)^2)

## cluster centers "fitted" to each obs.:
fitted.x <- fitted(cl);  head(fitted.x)
resid.x <- x - fitted(cl)

## Equalities : ----------------------------------
cbind(cl[c("betweenss", "tot.withinss", "totss")], # the same two columns
         c(ss(fitted.x), ss(resid.x),    ss(x)))
stopifnot(all.equal(cl$ totss,        ss(x)),
	  all.equal(cl$ tot.withinss, ss(resid.x)),
	  ## these three are the same:
	  all.equal(cl$ betweenss,    ss(fitted.x)),
	  all.equal(cl$ betweenss, cl$totss - cl$tot.withinss),
	  ## and hence also
	  all.equal(ss(x), ss(fitted.x) + ss(resid.x))
	  )

kmeans(x,1)$withinss # trivial one-cluster, (its W.SS == ss(x))

## random starts do help here with too many clusters
## (and are often recommended anyway!):
## The ordering of the clusters may be platform-dependent.
\dontdiff{
(cl <- kmeans(x, 5, nstart = 25))
}
plot(x, col = cl$cluster)
points(cl$centers, col = 1:5, pch = 8)
}
\keyword{multivariate}
\keyword{cluster}
