package com.fudian.spark_platform.Clustering

import com.fudian.spark_platform.Utils.{AssertUtils, MLUtils}
import org.apache.spark.ml.clustering.LDA
import org.apache.spark.ml.feature.{CountVectorizer, CountVectorizerModel}
import com.fudian.spark_platform.MLClusteringConf
import AssertUtils._
import scala.collection.mutable
import org.apache.spark.sql.DataFrame

case class MyRow(ChatMessageId:Option[String],content:Seq[String])
case class DisplayRow(topicIndex:Option[Int],termIndices:Seq[Int],indexMeaning:Seq[String],termWeights:Seq[Double])

class CountVectorLDA(conf:MLClusteringConf) extends Serializable{

    val mLUtils:MLUtils = conf.mLUtils


    //过滤非中文
    val CHregex = {
        "[\\u4e00-\\u9fa5]*".r
    }

    def clustering():DataFrame =  {

        //获取spark实例
        val spark = conf.spark
        import spark.implicits._
        spark.sparkContext.setLogLevel("ERROR")
        spark.sparkContext.setCheckpointDir("checkpoint")

        val tranform = conf.transformConf
        val mill = conf.millConf
        val mlConf = conf.mLConfig

        print(mlConf.mLConfig("LDA").asInstanceOf[mutable.Map[String,Any]].get("k"))


        //读取微信信息内容
        var weChatData = conf.dataF

        //过滤掉空值,位置数据
        weChatData = weChatData.withColumnRenamed("MsgTyoe" , "MsgType").
            filter(" MsgTyoe = 'text' ").
            filter(" content != '' ").
            select("ID","content")

        //然后开始过滤特殊字符了

        weChatData = weChatData.rdd.filter(data => {
            mLUtils.isFilter(data.get(1).toString)
        }).map(data => {
            (data.get(0),"[\\u4e00-\\u9fa5]*".r.findAllMatchIn(data.get(1).toString).mkString)
        }).map(data => {
            (data._1.toString, jieba(data._2))
        }).reduceByKey((a,b) => a + " " + b).map(data => {
            MyRow(
                Some(data._1),
                mLUtils.normalZhStop(data._2.toString.split(" ").toSeq)
            )
        }).toDF()


        //CounterToVector
        val cvModel: CountVectorizerModel = new CountVectorizer()
            .setInputCol("content")
            .setOutputCol("features")
            .setMinDF(2)
            .fit(weChatData)
        weChatData = cvModel.transform(weChatData)
        val indexMap = cvModel.vocabulary



        //做LDA聚类
        //使用LDA方法实现对聚类
        val model = new LDA().setK(5). //设置主题个数
            setMaxIter(50). //设置最大迭代次数,一般情况下迭代次数越多聚类越准确
            setSeed(5L). //设置随机种子
            setCheckpointInterval(10). //设置检查点
            setOptimizer("em"). //设置检验模式
            fit(weChatData)

        val ll = model.logLikelihood(weChatData)
        val lp = model.logPerplexity(weChatData)
        println(s"The lower bound on the log likelihood of the entire corpus: $ll")
        println(s"The upper bound on perplexity: $lp")

        //获取LDA聚类后的结果
        val topics = model.describeTopics(5)
        println("The topics described by their top-weighted terms:")
        //将termIndices字段的index含义找出来
        val topicsDF = topics.rdd.map(data => {
            val meansArr = data.get(1).asInstanceOf[Seq[Int]].map(indexMap(_))
            (data.get(0), data.get(1),meansArr,data.get(2))
        }).map(
            data => {
                DisplayRow(
                    Option(data._1.asInstanceOf[Int]),
                    data._2.asInstanceOf[Seq[Int]],
                    data._3.asInstanceOf[Seq[String]],
                    data._4.asInstanceOf[Seq[Double]]
                )
            }
        ).toDF()

        topicsDF.show(false)

        // Shows the result.
        val transformed = model.transform(weChatData)
        transformed.select("ChatMessageId","topicDistribution").show(false)
        transformed

    }
}
