package com.itcast.spark.baseCluster

import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.evaluation.ClusteringEvaluator
import org.apache.spark.ml.feature.{MinMaxScaler, VectorAssembler}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

/**
 * DESC:
 */
object _01KMeansTest {
  def main(args: Array[String]): Unit = {
    //1-准备环境
    val conf: SparkConf = new SparkConf().setAppName("_01KMeansTest").setMaster("local[*]")
    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")
    //2-准备数据源
    val dataDF: DataFrame = spark.read.format("csv").option("header", true).option("inferschema", true).load("./datasets/mldata/iris.csv")
    //3-数据的基本信息的查看
    //dataDF.printSchema()
    //dataDF.show(false)
    val usedDF: DataFrame = dataDF.drop("class")
    usedDF.printSchema()
    usedDF.show()
    val vectorAssembler: VectorAssembler = new VectorAssembler()
      .setInputCols(Array("sepal_length", "sepal_width", "petal_length", "petal_width"))
      .setOutputCol("features")
    val vecDF: DataFrame = vectorAssembler.transform(usedDF)
    //优化使用MinMaxScler归一化到0-1之间
    val scaler: MinMaxScaler = new MinMaxScaler().setInputCol("features").setOutputCol("minMaxfeatures")
    val scalerDF: DataFrame = scaler.fit(vecDF).transform(vecDF)
    //(WSSSE:,7.138647703985387)
    //4-引出聚类算法
    val means: KMeans = new KMeans()
      .setK(3)
      .setInitMode("k-means||") //random和Kmeasn++
      .setFeaturesCol("minMaxfeatures")
      .setPredictionCol("prediction")
      .setDistanceMeasure("euclidean")
    // .setSeed()在随机算则聚类中心的时候加上seed
    //6-模型训练
    val meansModel: KMeansModel = means.fit(scalerDF)
    //6-模型预测
    val pred: DataFrame = meansModel.transform(scalerDF)
    pred.show()
    //如何打印WSSE
    val WSSSE: Double = meansModel.computeCost(scalerDF)
    println("WSSSE:", WSSSE) //(WSSSE:,78.94506582597637)
    println("聚类中心")
    meansModel.clusterCenters.foreach(println(_))
    //因为鸢尾花是4个特征，所以聚类中心就是由四个值组成的
    //[5.88360655737705,2.7409836065573776,4.388524590163936,1.4344262295081969]
    //[5.005999999999999,3.4180000000000006,1.4640000000000002,0.2439999999999999]
    //[6.853846153846153,3.0769230769230766,5.715384615384615,2.053846153846153]
    // Evaluate clustering by computing Silhouette score
    //轮廓系数---跃进街与1约合理
    val evaluator = new ClusteringEvaluator()
    val silhouette = evaluator.evaluate(pred)
    println(s"Silhouette with squared euclidean distance = $silhouette")
    //Silhouette with squared euclidean distance = 0.6665619681518175
  }
}
