from pyspark import SparkContext
import os
from matplotlib import pyplot as plt

from pyspark.mllib.clustering import KMeans

os.environ['PYSPARK_PYTHON'] = "python3"


POINTS = [(1.83, 3.43), (1.17, 3.37), (1.23, 2.2), (2.67, 2.47), (2.67, 3.73), (2.43, 3.37), (2.23, 2.7), (1.97, 2.3),
          (1.53, 2.73), (1.97, 2.9), (1.97, 4.1), (1.33, 4.1), (1.63, 3.93), (1.1, 3.8), (1.43, 3.77), (1.6, 3.17),
          (1.2, 3.13), (0.83, 3.13), (0.9, 2.33), (1.0, 2.83), (4.27, 2.33), (3.93, 1.8), (4.03, 1.07), (4.73, 1.1),
          (4.8, 2.33), (4.27, 2.63), (3.6, 2.7), (3.67, 3.23), (3.1, 2.9), (2.83, 3.1), (3.6, 1.57), (3.47, 1.93),
          (4.07, 2.2), (3.83, 2.43), (4.67, 1.5), (4.3, 1.8), (4.63, 2.1), (4.73, 2.9), (4.63, 3.27), (1.4, 1.37),
          (1.77, 0.6), (2.63, 0.77), (2.67, 1.03), (2.33, 1.4), (1.77, 1.4), (1.77, 1.0), (2.13, 1.03), (1.2, 1.07),
          (1.2, 0.5), (1.67, 0.5), (1.67, 0.5), (2.4, 0.47), (3.13, 0.5), (3.07, 1.4), (2.47, 1.4), (2.33, 0.77),
          (1.6, 0.87), (1.53, 1.07), (1.53, 0.67), (2.23, 0.67), (2.3, 1.23)]

sc = SparkContext.getOrCreate()
rdd = sc.parallelize(POINTS)

model = KMeans.train(rdd, 3)

# clustered_pts = model.clusterCenters
#
# plt.scatter([x for x, _ in POINTS], [y for _, y in POINTS])
# plt.scatter([x for x, _ in clustered_pts], [y for _, y in clustered_pts])
# plt.show()

print(model.computeCost(rdd))