package com.fudian.spark_platform.Clustering

import com.fudian.spark_platform.MLClusteringConf
import org.apache.spark.ml.clustering.KMeans
import org.apache.spark.ml.feature.{StringIndexer, VectorAssembler}
import org.apache.spark.sql.DataFrame


class KMeansClustering(conf:MLClusteringConf) {

    def clustering():DataFrame = {

        //获取spark实例
        val spark = conf.spark
        spark.sparkContext.setLogLevel("ERROR")

        val dataset = conf.dataF

        val noTestData = dataset.withColumn("userSubject", dataset.col("用户学科"))
        noTestData.drop("用户学科")
        //StringIndexer 用户学科
        val indexer = new StringIndexer()
        val indexedData = indexer.setInputCol("userSubject").setOutputCol("indicesSubject").fit(noTestData).transform(noTestData)
        indexedData.drop("userSubject")

        //StringIndexer 用户学段
        val studyArea = indexer.setInputCol("用户学段").setOutputCol("indicesStudyArea").fit(indexedData).transform(indexedData).drop("用户学段")

        //StringIndexer 所在区
        var AreaData = indexer.setInputCol("所在区").setOutputCol("indicesArea").fit(studyArea).transform(studyArea).drop("所在区")

        //StringIndexer 课程名字
        AreaData = indexer.setInputCol("课程名字").setOutputCol("courseName").fit(AreaData).transform(AreaData).drop("课程名字")


        //学习形式为测试的才有成绩故先排除学习形式为测试的
        var noTestAreaData = AreaData.withColumn("studyType", AreaData.col("学习形式")).drop("学习形式").where(" studyType != '测试' ")
        noTestAreaData = indexer.setInputCol("studyType").setOutputCol("indicesStudyType").fit(noTestAreaData).transform(noTestAreaData)
        noTestAreaData.drop("studyType")

        //这里是对数据进行类型转换
        var indexValData = noTestAreaData.withColumn("studyTime", noTestAreaData.col("学习时长").cast("int")).drop("学习时长")
        indexValData = indexValData.withColumn("studyCount", noTestAreaData.col("该课件总学习次数").cast("int")).drop("课件总学习次数")
        indexValData = indexValData.withColumn("teachTime", indexValData.col("用户教龄").cast("int")).drop("用户教龄")

        //这里来填充缺失数据
        indexValData = indexValData.na.fill(value = 2, cols = Array("teachTime"))
        indexValData = indexValData.na.fill(value = 0, cols = Array("studyTime"))
        indexValData = indexValData.na.fill(value = 0, cols = Array("studyCount"))

        indexValData = indexValData.filter(" studyTime < 3000 ")
        indexValData = indexValData.filter(" studyCount < 80 ")


        //将相同的用户ID内容合并
        indexValData = indexValData.groupBy("用户id").agg(
            Map(
                "indicesSubject" -> "avg",
                "teachTime" -> "avg",
                "indicesStudyArea" -> "avg",
                "studyTime" -> "sum",
                "studyCount" -> "sum",
                "courseName" -> "max"
            )
        )
        indexValData = indexValData.withColumnRenamed("用户id", "userId")
            .withColumnRenamed("sum(studyTime)", "studyTime")
            .withColumnRenamed("avg(indicesStudyArea)", "indicesStudyArea")
            .withColumnRenamed("avg(indicesSubject)", "indicesSubject")
            .withColumnRenamed("avg(teachTime)", "teachTime")
            .withColumnRenamed("sum(studyCount)", "studyCount")
            .withColumnRenamed("max(courseName)", "courseName")

        indexValData = indexValData.withColumn("studyTimeAvg",indexValData.col("studyTime")*0.001).drop("studyTime").withColumnRenamed("studyTimeAvg","studyTime")


        //        indexValData = indexValData.selectExpr("sum(studyTime) as studyTime")
        //        indexValData.show(2)
        //这里是将多特征列合并成一个特征向量,放入到k-means中运算
        val assembler = new VectorAssembler()
            .setInputCols(Array("teachTime", "studyTime","courseName","studyCount"))
            .setOutputCol("features")
        val output = assembler.transform(indexValData)



        //        output = output.na.fill(value = "5",cols = Array("用户教龄"))
        output.show(20, truncate = false)

        // Trains a k-means model.
        val kmeans = new KMeans().setK(6).setMaxIter(50).setSeed(1L)
        val model = kmeans.fit(output)

        // Evaluate clustering by computing Within Set Sum of Squared Errors.
        val WSSSE = model.computeCost(output)
        println(s"Within Set Sum of Squared Errors = $WSSSE")

        // Shows the result.
        println("Cluster Centers: ")
        model.clusterCenters.foreach(println)

        println("K-means分类结果预测:")
        val modelTrain = model.transform(output)

        println("各分类数量如下:")
        println("分类一:" + modelTrain.where("prediction = 0").count())
        println("分类二:" +modelTrain.where("prediction = 1").count())
        println("分类三:" +modelTrain.where("prediction = 2").count())
        println("分类四:" +modelTrain.where("prediction = 3").count())
        println("分类五:" +modelTrain.where("prediction = 4").count())
        println("分类六:" +modelTrain.where("prediction = 5").count())
        modelTrain
    }

}
