package cn.itcast.czxy.BD18

import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.feature.{MinMaxScaler, MinMaxScalerModel}
import org.apache.spark.sql.{DataFrame, SparkSession}

/*
* 用于实现使用k_means给鸢尾花数据分类
* */
object Lris {
  def main(args: Array[String]): Unit = {
    //创建sparksession
    val spark: SparkSession = SparkSession.builder().appName("Lris").master("local[*]").getOrCreate()
    //读取libsvm数据
    val irislibsvmDF: DataFrame = spark.read.format("libsvm")
      .load("file:///E:\\教学\\学习资料4\\机器学习\\03挖掘型标签\\03挖掘型标签\\数据集\\iris_kmeans.txt")
//    irislibsvmDF.show(false)
    /*
    +-----+-------------------------------+
    |label|features                       |
    +-----+-------------------------------+
    |1.0  |(4,[0,1,2,3],[5.1,3.5,1.4,0.2])|
    |1.0  |(4,[0,1,2,3],[4.9,3.0,1.4,0.2])|
    |1.0  |(4,[0,1,2,3],[4.7,3.2,1.3,0.2])|
    |1.0  |(4,[0,1,2,3],[4.6,3.1,1.5,0.2])|
    */



    //数据归一化（将数据归一到0-1之间）速度快,,,,
    // 最大最小值归一化：将数据转化到0-1之间数值
    //MinMaxScaler
    //x`=(x)?
    val scaler: MinMaxScalerModel = new MinMaxScaler()
      //设置需要归一化的列
      .setInputCol("features")
      //归一化后的列名
      .setOutputCol("featuresOut")
      //设置数据
      .fit(irislibsvmDF)

    val scalerDF: DataFrame = scaler.transform(irislibsvmDF)
    /*
     +-----+-------------------------------+---------------------------------------------------------------------------------+
     |label|features                       |featuresOut                                                                      |
     +-----+-------------------------------+---------------------------------------------------------------------------------+
     |1.0  |(4,[0,1,2,3],[5.1,3.5,1.4,0.2])|[0.22222222222222213,0.6249999999999999,0.06779661016949151,0.04166666666666667] |
     |1.0  |(4,[0,1,2,3],[4.9,3.0,1.4,0.2])|[0.1666666666666668,0.41666666666666663,0.06779661016949151,0.04166666666666667] |
     |1.0  |(4,[0,1,2,3],[4.7,3.2,1.3,0.2])|[0.11111111111111119,0.5,0.05084745762711865,0.04166666666666667]                |
    */

    //使用k_means计算
    val prediction: KMeansModel = new KMeans()
      .setK(3) //分类的个数
      .setMaxIter(20) //设置最大计算次数
      .setFeaturesCol("featuresOut") //计算哪个特征列
      .setPredictionCol("PredictionValue") //设置最终预测后的结果的列名
      .setSeed(10) //设置随机种子
      .fit(scalerDF)
    val predictionDF = prediction.transform(scalerDF)
    predictionDF.groupBy("label","PredictionValue").count().show()
  }
}
