from pyspark.ml.linalg import Vectors
from pyspark.sql import SparkSession
from pyspark.sql import Row

from pyspark.ml.clustering import KMeans,KMeansModel
spark = SparkSession.builder.master('local').appName('kmeans').getOrCreate()


def load_data(x):
    rel = {}
    rel['features'] = Vectors.dense(float(x[0]), float(x[1]), float(x[2]), float(x[3]))
    rel['label'] = str(x[4])
    return rel
data = spark.sparkContext.textFile('./Iris.txt').map(lambda line: line.split(',')).map(lambda p: Row(**load_data(p))).toDF()

kmeansmodel=KMeans(k=3,featuresCol='features',predictionCol='prediction').fit(data)
results=kmeansmodel.transform(data).collect()
for item in results:
    print(str(item[0])+' is predicted as cluster'+str(item[1]))

centers=kmeansmodel.clusterCenters()
for item in centers:
    print(item)

print(kmeansmodel.computeCost(data))