# -*- coding: utf-8 -*-
import sys

from pyspark.mllib.util import MLUtils

sys.path.append("../")
from pyspark.ml.feature import IDF
from pyspark.mllib.feature import HashingTF as MLH
from pyspark.ml.clustering import KMeans
from clustering import Clustering


class KNNClustering(Clustering):
    def __init__(self, ctx, df, params):
        super(KNNClustering, self).__init__(ctx, df, params, False)
        self.ctx = ctx
        self.LDAParams = self.params["clusteringParams"]

    def clustering(self):

        print(self.df.printSchema())
        # mlHashingTF = MLH()
        # # 通过hashingTF提供的indexOf方法，获取单词和索引的映射关系，然后将对应关系广播到各节点
        # mapWordsRdd = self.df.rdd.flatMap(lambda x: x["words"]).map(lambda w: (mlHashingTF.indexOf(w), w))
        # mapList = mapWordsRdd.collect()
        # bdMapList = self.ctx.sparkContext.broadcast(mapList)
        #
        # # 特征转化，单词的向量形式转化
        # hashingData = self.df.rdd.map(lambda x: (x, mlHashingTF.transform(x["words"]))) \
        #     .toDF() \
        #     .toDF("words", "featuresOut")
        # MLHashingData = MLUtils.convertVectorColumnsToML(hashingData, "featuresOut")
        #
        # # IDF算法调用，IDF的最终目的是去除多次重复出现在文本权重的影响，使得结果更加的平滑和客观
        # idfModel = IDF(2,inputCol="featuresOut",outputCol="features")
        # model = idfModel.fit(MLHashingData)
        # resultsData = model.transform(MLHashingData)

        #Kmeans
