package com.spark.ml

import com.mongodb.spark.{MongoSpark}
import org.apache.log4j.{Logger, Level}
import org.apache.spark.mllib.feature.IDF
import org.apache.spark.mllib.feature.HashingTF
import org.apache.spark.mllib.linalg.{Vector, SparseVector}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkContext, SparkConf}
import com.mongodb.spark.config._
import org.apache.spark.mllib.clustering.{LDA}

/**
  * Created by xiaojun on 2017/6/5.
  */
object TF_IDF_LDA  {

    //待过滤内容,去掉抓取过程中的非法和停用词
    val stopwords = Set(
        "the", "a", "an", "of", "or", "in", "for", "by", "on", "but", "is", "not", "with", "as", "was", "if",
        "they", "are", "this", "and", "it", "have", "from", "at", "my", "be", "that", "to", "what", "which", "date",
        "config", "mit", "ctx", "width", "height", "dean", "again", "givendescription", "length", "none", "menu", "ocw", "googleaddattr", "function"
    )

    //过滤掉纯数字内容
    val regexNum = {
        "[^0-9]*".r
    }

    /**
      * 对文本文字进行分词操作,并且过滤明显不合适的词组
      *
      * @param content 待分割的词语组成的文本
      * @return
      */
    def tokenize(content: String): Seq[String] = {
        content.split("\\W+")
            .map(_.toLowerCase)
            .filter(regexNum.pattern.matcher(_).matches) //去掉包含数字的词组
            .filterNot(stopwords.contains) //去掉自定义的停用词
            .filter(_.length > 1) //去掉长度小于2的英文单词
            .toSeq
    }

    /**
      * 进行聚类分析方法
      *
      */
    def doTest() {
        //初始化一个sparkConf对象,设置相关的配置信息
        val conf = new SparkConf()
            .setAppName("scalaTest")
            .setMaster("local[*]")
            .set("spark.mongodb.input.uri", "mongodb://127.0.0.1:27017/openCourse.college_html_back")
            .set("spark.mongodb.output.uri", "mongodb://127.0.0.1:27017/openCourse.college_html_out")
        //获取sparkContext对象
        val sc = new SparkContext(conf)
        //设置打印日志的级别,减少控制台的日志输出
        Logger.getRootLogger.setLevel(Level.ERROR)
        //配置mongodb相关信息的另一种方法
        val readConfig = ReadConfig(Map("collection" -> "college_html_back", "readPreference.name" -> "secondaryPreferred"), Some(ReadConfig(sc)))
        //通过配置信息加载mongo数据
        val mongoData = MongoSpark.load(sc, readConfig = readConfig)
        //缓存mongo数据
        mongoData.cache()
        //拆分mongdRdd
        val waitData = mongoData.map(data => {
            (data.getString("title"), data.getString("html"))
        })
        //主题Rdd
        val titles = waitData.map(_._1)
        //文本Rdd
        val documents = waitData.map(_._2).map(tokenize)

        //实例化hashingTF对象,这里调用的是spark.mllib库的hashingTF
        val hashingTF = new HashingTF()
        //通过对documents的value值使用flatMap的变换,统一value值到一起,得出了索引和word的一一对应的map关系,这里主要是利用了HashingTF的 indexOf方法
        val mapWords = documents.flatMap(x => x)
            .map(w => (hashingTF.indexOf(w), w))
            .collect
            .toMap
        //使用documents来进行hashingTF转换
        val tf = hashingTF.transform(documents)
        //将生成的单词语index对应关系的Map传播到各个worker上去,使得改值能在各个节点上被使用
        val bcWords = tf.context.broadcast(mapWords)
        tf.cache()
        //实例化IDF对象,调用fit方法训练IDF模型,并指出mindocs的值为2,表示至少在2个文当中出现过的词才会被计算
        val idf = new IDF(2).fit(tf)
        //使用hashingTF的内容来得到idf转换
        val tf_idf: RDD[Vector] = idf.transform(tf)
        //遍历生成的idf结果,得到每篇文章的前20个模糊主题的内容
        val r = tf_idf.map {
            case SparseVector(size, indices, values) =>
                val words = indices.map(index => bcWords.value.getOrElse(index, "null"))
                words.zip(values).sortBy(-_._2).take(20).toSeq
        }
        //将title和生成的结果组合,将结果写入到了本地文件系统中
        titles.zip(r).saveAsTextFile("/Users/xiaojun/Desktop/scalaLog/mongo_yingwen_" + System.currentTimeMillis)

        //根据LDA的输入的需要,调用rdd的zipWithIndex方法将rdd标记后换位转换
        val rRdd = tf_idf.zipWithIndex().map(data => {
            (data._2, data._1)
        })
        //使用LDA方法实现对idf输出的结果进行聚类
        val ldaModel = new LDA().setK(3). //设置主题个数
            setDocConcentration(10). //设置文本
            setTopicConcentration(10). //设置主题
            setMaxIterations(100). //设置最大迭代次数,一般情况下迭代次数越多聚类越准确
            setSeed(0L). //设置随机种子
            setCheckpointInterval(10). //设置检查点
            setOptimizer("em"). //设置检验模式
            run(rRdd)

        //获取LDA聚类后的结果
        val desTopics = ldaModel.describeTopics(10)
        //循环打印结果
        desTopics.foreach(data => {
            print("Topic  :\n")
            for (i <- Range(0, 10)) {
                val tempIndex = data._1.toList(i)
                val tempWeight = data._2.toList(i)
                println(bcWords.value.getOrElse(tempIndex, "null") + "   " + tempWeight)
            }

        })
        //退出sparkContext,结束操作
        sc.stop()
    }


    /**
      * 入口方法
      *
      * @param args
      */
    def main(args: Array[String]) {
        doTest()
        println("......")
    }
}

