package com.fudian.spark_platform.Clustering

import com.fudian.spark_platform.MLClusteringConf
import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.spark.ml.feature.StringIndexer
import org.apache.spark.ml.recommendation.ALS
import org.apache.spark.sql.types.{StructField, _}
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.functions._
import scala.collection.mutable


class ALSClustering(conf: MLClusteringConf) {

    def clustering(): DataFrame = {

        //获取spark实例
        val spark = conf.spark
        //spark.sparkContext.setLogLevel("ERROR")
        spark.sparkContext.setCheckpointDir("checkpoint")

        val tranform = conf.transformConf
        val mill = conf.millConf
        val mlConf = conf.mLConfig
        val mLUtils = conf.mLUtils
        val LDAConf = mlConf.mLConfig("ALS").asInstanceOf[mutable.Map[String, Any]]

        //读取用户答卷成绩表
        var userRecord = conf.dataF
        userRecord = userRecord.filter(" record1 != '0' or record2 != '0'")

        //建立一个模型,我们认为学生在 课程上花费的时间约短分数越高的为对这门课程更适合
        userRecord = userRecord.groupBy("courseId", "stdNo").agg(
            Map(
                "courseId" -> "max",
                "stdNo" -> "max",
                "totalTime" -> "avg",
                "record1" -> "avg",
                "record2" -> "avg",
                "startTime" -> "max"
            )
        )

        userRecord = userRecord.drop("courseId").drop("stdNo").toDF("startTime", "courseId", "record2", "record1", "stdNo", "totalTime")
        //将数据聚合到一起
        val rddInfo = userRecord.rdd.map(f = data => {
            val record2 = data.get(2).asInstanceOf[Double]
            val record1 = data.get(3).asInstanceOf[Double]
            val totalTime = data.get(5).asInstanceOf[Double]
            val record = (record1 + record2) / totalTime
            Row(data.get(1), data.get(4), data.get(1), data.get(4), (record * 100).toInt)
        })

        //将操作过后的rdd转成dataFrame
        val schemaString = "courseId stdNo indexToCourseId indexToStdNo rating"

        // Generate the schema based on the string of schema
        val fields = schemaString.split(" ")
            .map(fieldName => {
                if (fieldName != "rating") {
                    StructField(fieldName, StringType, nullable = true)
                } else {
                    StructField(fieldName, IntegerType, nullable = true)
                }
            })
        val schema = StructType(fields)

        var unionDF = spark.createDataFrame(rddInfo, schema)

        //缓存变换的DF
        unionDF.cache()

        //将课程和学生号数值化
        val indexer = new StringIndexer()
        unionDF = indexer.setInputCol("indexToCourseId").setOutputCol("indicesCid").fit(unionDF).transform(unionDF).drop("indexToCourseId")
        unionDF = indexer.setInputCol("indexToStdNo").setOutputCol("indicesSid").fit(unionDF).transform(unionDF).drop("indexToStdNo")

        unionDF = unionDF.filter(" rating < 100 ")
        //将数据分为训练集和测试集
        val Array(training, testing) = unionDF.randomSplit(Array(0.9, 0.1))

        //先训练模型
        val als = new ALS()
            .setRank(80)
            .setMaxIter(50)
            .setRegParam(0.01)
            .setImplicitPrefs(true)
            .setUserCol("indicesSid")
            .setItemCol("indicesCid")
            .setRatingCol("rating")

        val model = als.fit(training)

        model.setColdStartStrategy("drop")

        val predictions = model.transform(testing)

        val evaluator = new RegressionEvaluator()
            .setMetricName("rmse")
            .setLabelCol("rating")
            .setPredictionCol("prediction")
        val rmse = evaluator.evaluate(predictions)
        // Generate top 10 movie recommendations for each user
        val userRecs = model.recommendForAllUsers(5)
        val reUser = userRecs.withColumnRenamed("recommendations", "to_student_recommend")
        // Generate top 10 user recommendations for each movie
        val movieRecs = model.recommendForAllItems(5)
        val reMoive = movieRecs.withColumnRenamed("recommendations", "to_course_recommend")
        //然后将数据整合在一起
        val results = predictions.join(reUser, "indicesSid").join(reMoive, "indicesCid")
        val schema_results = mLUtils.getResultsDataStructType("indicesCid:Int rating:Double")
        val dataType_results_studcent = ArrayType(schema_results)
        val dataType_results_course = ArrayType(mLUtils.getResultsDataStructType("indicesSid:Int rating:Double"))
        val new_results = results.withColumn("define_recommend_student", results.col("to_student_recommend").cast(dataType_results_studcent))
            .withColumn("define_recommend_course", results.col("to_student_recommend").cast(dataType_results_course))
            .withColumn("define_prediction", results.col("prediction").cast(DoubleType))
            .drop("to_student_recommend")
            .drop("to_course_recommend").drop("prediction")
        print(new_results.printSchema())
        new_results
    }
}
