package com.scala.sparkML
import org.apache.spark.ml.clustering.KMeans
import org.apache.spark.ml.feature.{HashingTF, IDF, Tokenizer}
import org.apache.spark.sql.SparkSession
object KmeansTest {
    case class  Product(id:String,companyName:String,direction:String,productInfo:String)
    def main(args: Array[String]): Unit = {
        val spark = SparkSession
            .builder()
            .appName("TFIDF@2")
            .master("local")
            //.config("spark.sql.warehouse.dir", "C:\\Users\\Administrator\\Desktop\\spark-warehouse")
            .getOrCreate()

        // 导入spark的隐式转换
        import spark.implicits._
        val products = spark.sparkContext.textFile("dataFrametext").map { x =>
            val data = x.split(",")
            Product(data(0),data(1),data(2),data(3))
        }.toDS().cache()
        //将productinfo数据  去掉空格分开存储
        val productData = new Tokenizer().setInputCol("productInfo").setOutputCol("productWords").transform(products)
        //计算hashingtf
        val tfData = new HashingTF().setNumFeatures(20).setInputCol("productWords").setOutputCol("productFeatures").transform(productData)
        //idfModel计算
        val idfModel = new IDF().setInputCol("productFeatures").setOutputCol("features").fit(tfData)
        val idfData = idfModel.transform(tfData)
        val trainingData = idfData.select("id","companyname","features")

        val kmeans=new KMeans()
            .setK(4)
            .setMaxIter(5)//最大迭代次数
            .setFeaturesCol("features")
            .setPredictionCol("prediction")
        val kmeansModel = kmeans.fit(trainingData)
        //save操作
        val kmeansData = kmeansModel.transform(trainingData)
        kmeansData.show()
    }

}
