from pyspark import SparkConf, SparkContext
from sklearn.neighbors import NearestNeighbors
from sklearn import datasets
import numpy as np

#spark环境
conf = SparkConf().setMaster("local[4]").setAppName("test")
sc = SparkContext(conf=conf)

#训练数据集
iris = datasets.load_iris()

#测试数据集
#test_data = iris.data + 1
#test_data = [[i] for i in test_data]
#print(test_data)
test_data = [[[4.9, 3. , 1.4, 0.8]],[[7.2, 3. , 1.4, 0.8]],[[4.9, 3.6 , 1.4, 0.8]]]


myvecs = sc.parallelize(test_data, 4)
myvecs.cache()
# Create kNN tree locally, and broadcast
myvecscollected = myvecs.collect()

knnobj = NearestNeighbors().fit(iris.data, iris.target)
bc_knnobj = sc.broadcast(knnobj)

# Get neighbors for each point, distributedly
results = myvecs.map(lambda x: bc_knnobj.value.kneighbors(x))

print(results.collect())
