import org.apache.spark.SparkContext
import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.feature.{MinMaxScaler, MinMaxScalerModel}
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * 使用k-means为lris数据分类
  */
object test2 {
  def main(args: Array[String]): Unit = {
    //1 创建sparkSession
    val spark: SparkSession = SparkSession.builder().appName("lris").master("local[*]").getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")

    //2 读取libsvm数据
    val lrisLibSvmDF: DataFrame = spark.read.format("libsvm")
      .load("file:///D:\\大数据\\学期文档\\项目\\03挖掘型标签\\数据集\\iris_kmeans.txt")
   // lrisLibSvmDF.show(false)
    //+-----+-------------------------------+
    //|label|features                       |
    //+-----+-------------------------------+
    //|1.0  |(4,[0,1,2,3],[5.1,3.5,1.4,0.2])|
    //|1.0  |(4,[0,1,2,3],[4.9,3.0,1.4,0.2])|
    //|1.0  |(4,[0,1,2,3],[4.7,3.2,1.3,0.2])|
    //|1.0  |(4,[0,1,2,3],[4.6,3.1,1.5,0.2])|

    //3 数据归一化 将数据归一到0到1之间 计算速度快
    val scalerDatas: MinMaxScalerModel = new MinMaxScaler()
      .setInputCol("features")
      .setOutputCol("featuresOut")
      .fit(lrisLibSvmDF)

    val scalerDF: DataFrame = scalerDatas.transform(lrisLibSvmDF)
    scalerDF.show(false)

    //+-----+-------------------------------+---------------------------------------------------------------------------------+
    //|label|features                       |featuresOut                                                                      |
    //+-----+-------------------------------+---------------------------------------------------------------------------------+
    //|1.0  |(4,[0,1,2,3],[5.1,3.5,1.4,0.2])|[0.22222222222222213,0.6249999999999999,0.06779661016949151,0.04166666666666667] |
    //|1.0  |(4,[0,1,2,3],[4.9,3.0,1.4,0.2])|[0.1666666666666668,0.41666666666666663,0.06779661016949151,0.04166666666666667] |
    //|1.0  |(4,[0,1,2,3],[4.7,3.2,1.3,0.2])|[0.11111111111111119,0.5,0.05084745762711865,0.04166666666666667]                |
    //|1.0  |(4,[0,1,2,3],[4.6,3.1,1.5,0.2])|[0.08333333333333327,0.4583333333333333,0.0847457627118644,0.04166666666666667]  |
    //|1.0  |(4,[0,1,2,3],[5.0,3.6,1.4,0.2])|[0.19444444444444448,0.6666666666666666,0.06779661016949151,0.04166666666666667] |


    //    //4 使用k means进行计算
    val prediction: KMeansModel = new KMeans()
      .setK(3) //设置需要划分的类别的数量/个数
      .setMaxIter(10) //设置最大的计算次数
      .setFeaturesCol("featuresOut") //设置特征的列 归一化后的列
      .setPredictionCol("PredictionValue") //设置最终预测后的结果列名
      .setSeed(10) //设置随机种子
      .fit(scalerDF)

    val predictionDF: DataFrame = prediction.transform(scalerDF)
    predictionDF.show(false)
    //+-----+-------------------------------+---------------------------------------------------------------------------------+---------------+
    //|label|features                       |featuresOut                                                                      |PredictionValue|
    //+-----+-------------------------------+---------------------------------------------------------------------------------+---------------+
    //|1.0  |(4,[0,1,2,3],[5.1,3.5,1.4,0.2])|[0.22222222222222213,0.6249999999999999,0.06779661016949151,0.04166666666666667] |0              |
    //|1.0  |(4,[0,1,2,3],[4.9,3.0,1.4,0.2])|[0.1666666666666668,0.41666666666666663,0.06779661016949151,0.04166666666666667] |0              |
    //|1.0  |(4,[0,1,2,3],[4.7,3.2,1.3,0.2])|[0.11111111111111119,0.5,0.05084745762711865,0.04166666666666667]                |0              |
    //|1.0  |(4,[0,1,2,3],[4.6,3.1,1.5,0.2])|[0.08333333333333327,0.4583333333333333,0.0847457627118644,0.04166666666666667]  |0              |
    //|1.0  |(4,[0,1,2,3],[5.0,3.6,1.4,0.2])|[0.19444444444444448,0.6666666666666666,0.06779661016949151,0.04166666666666667] |0              |

//    predictionDF.groupBy("label","PredictionValue").count().show()

  }

}
