package com.itcast.spark.baseCluster

import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.evaluation.ClusteringEvaluator
import org.apache.spark.ml.feature.{MinMaxScaler, VectorAssembler}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

/**
 * DESC:
 */
object _02KMeansJKSelector {
  def main(args: Array[String]): Unit = {
    //1-准备环境
    val conf: SparkConf = new SparkConf().setAppName("_02KMeansJKSelector").setMaster("local[*]")
    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")
    //2-准备数据源
    val dataDF: DataFrame = spark.read.format("csv").option("header", true).option("inferschema", true).load("./datasets/mldata/iris.csv")
    //3-数据的基本信息的查看
    //dataDF.printSchema()
    //dataDF.show(false)
    val usedDF: DataFrame = dataDF.drop("class")
    usedDF.printSchema()
    usedDF.show()
    val vectorAssembler: VectorAssembler = new VectorAssembler()
      .setInputCols(Array("sepal_length", "sepal_width", "petal_length", "petal_width"))
      .setOutputCol("features")
    val vecDF: DataFrame = vectorAssembler.transform(usedDF)
    //优化使用MinMaxScler归一化到0-1之间
    val scaler: MinMaxScaler = new MinMaxScaler().setInputCol("features").setOutputCol("minMaxfeatures")
    val scalerDF: DataFrame = scaler.fit(vecDF).transform(vecDF)
    //(WSSSE:,7.138647703985387)
    val K: Array[Int] = Array(2, 3, 4, 5, 6, 7, 8)
    K.foreach(kiter => {
      //4-引出聚类算法
      val means: KMeans = new KMeans()
        .setK(kiter)
        .setInitMode("k-means||") //random和Kmeasn++
        .setFeaturesCol("minMaxfeatures")
        .setPredictionCol("prediction")
        .setDistanceMeasure("euclidean")
      // .setSeed()在随机算则聚类中心的时候加上seed
      //6-模型训练
      val meansModel: KMeansModel = means.fit(scalerDF)
      //6-模型预测
      val pred: DataFrame = meansModel.transform(scalerDF)
      //如何打印WSSE
      val WSSSE: Double = meansModel.computeCost(scalerDF)
      println("kiter is:", kiter, "WSSSE is:", WSSSE) //(WSSSE:,78.94506582597637)
      println("=======" * 10)
    })
  }
}
