package com.fudian.spark_platform.Clustering

import com.fudian.spark_platform.DBConnector.MongoConnector
import com.fudian.spark_platform.MLUtils._
import com.mongodb.spark.rdd.DocumentRDDFunctions
import org.bson.Document



object FX168Clustering {

    def main(args: Array[String]) {
        loadANSJDict()
        setDefaultConf()

        if(args.length > 0){
            settingsConf(args(0))
        }
        //初始化mongodb的实例
        val mongoConnector = new MongoConnector()
        mongoConnector.setLogLevel("ERROR")
        //初始化对象
        val mongoData = mongoConnector.getMongoLoad()
        //缓存加载出来的mongodb数据
        mongoData.cache()
        //转换rdd的格式,将数据分开
        val waitRdd = mongoData.map(data => {
            (data.getString("html"), data.getString("title"), data.getString("url"))
        })
        //调用函数,加载个人字典,再这里调用的目的是使字典只被加载一次

        //通过对document的Rdd操作,取出每个文本,先进行分词操作,然后将得到的词组list和并,用空格链接各个元素后交给tokenize方法去实现过滤和特征转换
        val document = waitRdd.map(data => {
            (data._1,data._3)
        }).map(dataZ => {
            (ansjCut(dataZ._1),dataZ._2)
        }).map(dataX => {
            (tokenize(dataX._1),dataX._2)
        }).map(dataF => {
            (isFanfa(dataF._1),dataF._2)
        }).filter(Res => {
            Res._1.nonEmpty
        }).cache()
        //开始对多方面进行分析
        val documentCountTop10 = document.map(data => {(data._1.length,(data._1,data._2))}).sortBy(_._1,false).take(10).foreach(println(_))
        //然后是对单个过滤词的存在数量进行统计
        val wordsCountTop10 = document.flatMap(_._1).map(x =>{(x,1)}).reduceByKey(_ + _).sortBy(_._2,false).take(10).foreach(println(_))
        //所有标记数量
        val totalPage = document.count()
        //所有的标记记录导出
        print("所有非法宣传页面的数量为: " + totalPage.toString)
        //这里必须将rdd转换成 rdd[Document]类型才能写入数据
        val documentD = document.map(x=>
            {
                var tempStr = ""
                x._1.foreach(y => {
                    tempStr += y +" "
                })
                (tempStr,x._2)
            }).map(i => { Document.parse("{\"words\":\""+i._1+"\",\"url\":\""+i._2+"\"}")})
        DocumentRDDFunctions.apply(documentD).saveToMongoDB()
    }

}
