package com.itcast.spark.baseCluster

import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.evaluation.ClusteringEvaluator
import org.apache.spark.ml.feature.{MinMaxScaler, VectorAssembler}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

/**
 * DESC:
 */
object _03KMeansMediumTest {
  def main(args: Array[String]): Unit = {
    //1-准备环境
    val conf: SparkConf = new SparkConf().setAppName("_01KMeansTest").setMaster("local[*]")
    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")
    //2-准备数据源
    val dataDF: DataFrame = spark.read.format("csv").option("header", true).option("inferschema", true).load("./datasets/mldata/medium.txt")
    val vectorAssembler: VectorAssembler = new VectorAssembler()
      .setInputCols(Array("weights", "ph"))
      .setOutputCol("features")
    val vecDF: DataFrame = vectorAssembler.transform(dataDF)
    //(WSSSE:,7.138647703985387)
    //4-引出聚类算法
    val means: KMeans = new KMeans()
      .setK(2)
      .setFeaturesCol("features")
      .setPredictionCol("prediction")
      .setDistanceMeasure("euclidean")
    // .setSeed()在随机算则聚类中心的时候加上seed
    //6-模型训练
    val meansModel: KMeansModel = means.fit(vecDF)
    //6-模型预测
    val pred: DataFrame = meansModel.transform(vecDF)
    pred.show()
    //如何打印WSSE
    val WSSSE: Double = meansModel.computeCost(vecDF)
    println("WSSSE:", WSSSE) //(WSSSE:,78.94506582597637)
    println("聚类中心")
    meansModel.clusterCenters.foreach(println(_))
    // Evaluate clustering by computing Silhouette score
    //轮廓系数---跃进街与1约合理
    val evaluator = new ClusteringEvaluator()
    val silhouette = evaluator.evaluate(pred)
    println(s"Silhouette with squared euclidean distance = $silhouette")
    //Silhouette with squared euclidean distance = 0.8967364744598525
  }
}
