# encoding=utf-8

from pyspark import SparkContext,SparkConf
import math
import os

os.environ['PYSPARK_PYTHON'] = "python3"


K = 7
P = (1.0, 0.5)

conf = SparkConf().set("spark.app.name", "KNN")\
    .set("spark.driver.cores", "1")\
    .set("spark.driver.memeory", "512m")\
    .set("spark.executor.memory", "512m")

sc = SparkContext(conf=conf)
sc.setLogLevel("WARN")
sc.addPyFile("/home/bigdata/Workspace/P1905/spark_example/utils.py")

rdd = sc.textFile("file:///home/bigdata/Workspace/P1905/spark_example/points-data.txt")

point = sc.broadcast((float(P[0]), float(P[1])))


def map_line(data):
    x,y,label = data.split(",")
    return ((float(x), float(y)), label)


# points = rdd.map(map_line).collect()
# plt.plot([], [], color='r')


# def distance(p1, p2):
#     return math.sqrt(math.pow(p2[0]-p1[0],2) + math.pow(p2[1] - p1[1], 2))

def map_to_distance(pt):
    from utils import distance
    dist = distance(pt[0], point.value)
    return (dist, pt)


rdd2 = rdd.map(map_line).map(map_to_distance).sortByKey()

rdd3 = sc.parallelize(rdd2.collect()[0:K])

pt = rdd3.map(lambda x: (x[1][1], x[0])).groupByKey().map(lambda d: (len(d[1]), d[0])).sortByKey(ascending=False).take(1)
print("The point {} is labeled {}".format(point.value, pt[0][1]))
# for pt in rdd2.collect()[0:K]:
#     print(pt)


#
# data = sc.broadcast([1,2,3,4,5,6,7])
#
# rdd = sc.parallelize((1,2,3,4,5,6))
#
#
# def map_sum(d):
#     # global data
#
#     # data += d
#     print(data.value)
#     return d
#
# rdd2 = rdd.map(map_sum)
#
# print(data)

