from pyspark.ml.clustering import KMeans
from pyspark.sql import SparkSession
# Loads data.
spark = SparkSession \
        .builder \
        .appName("KMeansExample") \
        .getOrCreate()
dataset = spark.read.format("libsvm").load("D:\Develop\Spark\spark-2.4.0-bin-hadoop2.7\data\mllib\sample_kmeans_data.txt")

# Trains a k-means model.
kmeans = KMeans(featuresCol="features", k=2, maxIter=20, seed=None)
model = kmeans.fit(dataset)

# Evaluate clustering by computing Within Set Sum of Squared Errors.
wssse = model.computeCost(dataset)
print("Within Set Sum of Squared Errors = " + str(wssse))

# Shows the result.
centers = model.clusterCenters()
print("Cluster Centers: ")
for center in centers:
    print(center)
