package com.spark.ml

import java.nio.file.FileSystems
import java.util.Base64

import com.huaban.analysis.jieba.{WordDictionary, JiebaSegmenter}
import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config.ReadConfig
import org.apache.commons.lang.StringUtils
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{Row, SQLContext}
import org.apache.spark.sql.types.{ArrayType, StringType, StructField, StructType}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import scala.util.parsing.json.JSON
import org.apache.spark.ml.feature.StopWordsRemover
import scala.io.Source

/**
  * Copyright FuMan AI DataDimming.co SHANGHAI,RPC
  * File can't be used to License
  * Created by xiaoJun on 2017/6/20.
  */
class MLBase {

    //实例化结巴分词对象
    val jiebaOp = new JiebaSegmenter()

    var MongoUri = "mongodb://127.0.0.1:27017/openCourse.course_html"
    var SparkMaster = "local[*]"
    var AllowNumber = false
    var ChineseEnable = false
    var LimitCount = 1
    var StopOtherWords:Set[String] = Set()
    var OutputWay = "console"

    //待过滤内容,去掉抓取过程中的非法和停用词
    var ZHStopWords:Set[String] = Set()

    var ENStopWords:Set[String] = Set()

    /**
      * 获取停用词表中的停用词
      * @return
      */
    def getDefaultStopWords(): (Array[String], Array[String]) = {
        val properties = System.getProperties
        val relativelyPath = properties.getProperty("user.dir")
        var tempArr = ""
        for(line <- Source.fromFile(relativelyPath + "/src/resources/stopWords_en.txt").getLines()) {
            tempArr = tempArr + " " + line
        }
        var tempArrZh = ""
        for(line <- Source.fromFile(relativelyPath + "/src/resources/stopWords_zh.txt").getLines()) {
            tempArrZh = tempArrZh + " " + line
        }
        (tempArr.split(" "), tempArrZh.split(" "))
    }

    //过滤掉纯数字内容
    val regexNum = {
        "[^0-9]*".r
    }

    //过滤非中文
    val CHregex = {
        "[\u4e00-\u9fa5]*".r
    }

    //baseReg
    val baseRegex = {
        "[a-zA-Z0-9\\\\u4e00-\\\\u9fa5]*".r
    }

    /**
      * 加载用户自定义字典
      */
    def loadDict(): Unit = {
        val path = FileSystems.getDefault.getPath("../", "dic.txt")
        WordDictionary.getInstance().loadUserDict(path)
    }


    def settingsConf(conf: String) {
        //base64解码
        val jsonStr = new String(Base64.getDecoder.decode(conf))
        //接受到的参数一个json字符串
        val jsonConf = JSON.parseFull(jsonStr)
        val confObj = false
        jsonConf match {
            case Some(map:Map[String, Any]) => setConf(map)
            case None => println("解析参数配置失败")
            case other => println("非法的参数结构: " + other)
        }
    }

    def setDefaultConf(): Unit = {
        val (enList , zhList) = getDefaultStopWords()
        ENStopWords = enList.toSet
        ZHStopWords = zhList.toSet
        if(ChineseEnable){
            StopOtherWords =  ZHStopWords
        }else{
            StopOtherWords = ENStopWords
        }
    }

    /**
      * 配置公有的数据库等配置信息
      *
      * @param conf 配置Map
      */
    def setConf(conf:Map[String,Any]) = {
        this.AllowNumber = java.lang.Boolean.parseBoolean(conf.getOrElse("AllowNumber",false).toString)
        this.ChineseEnable = java.lang.Boolean.parseBoolean(conf.getOrElse("ChineseEnable",false).toString)
        this.MongoUri = conf.getOrElse("MongoUri",this.MongoUri).toString
        this.LimitCount = java.lang.Integer.parseInt(conf.getOrElse("LimitCount",this.LimitCount).toString)
        val stopWordsStr = conf.getOrElse("StopOtherWords"," ").toString
        val stopWordsArr = stopWordsStr.split(" ").toSet
        if(this.ChineseEnable) {
            StopOtherWords = ZHStopWords | stopWordsArr
        }else{
            StopOtherWords = ENStopWords | stopWordsArr
        }
        this.OutputWay = conf.getOrElse("OutputWay",this.OutputWay).toString
        this.SparkMaster = conf.getOrElse("SparkMaster",this.SparkMaster).toString
    }

    /**
      * 获取指定的mongdb中的数据
      *
      * @param spark sparkSession的实例
      * @return 数据读取的Rdd
      */
    def getMongoLoad(spark: SparkContext) = {
        Logger.getRootLogger.setLevel(Level.ERROR)
        val readConfig = ReadConfig(Map("readPreference.name" -> "secondaryPreferred"), Some(ReadConfig(spark)))
        val mongoData = MongoSpark.load(spark, readConfig = readConfig)
        mongoData
    }

    /**
      * 调用结巴分词,将中文分词
      *
      * @param content 待分词的字符串
      * @return
      */
    def jieba(content: String): String = {
        val data = jiebaOp.sentenceProcess(content)
        //为了可以调用tokenize方法,我们这里将生成的list数据,通过空格链接起来,返回一个string
        val results = StringUtils.join(data, " ")
        results
    }

    /**
      * 对文本文字进行分词操作,并且过滤明显不合适的词组
      *
      * @param content 待分割的词语组成的文本
      * @return
      */
    def tokenize(content: String): Seq[String] = {
        var tokenRdd = content.split(" ").map(_.toLowerCase).map(_.trim()).filter(this.baseRegex.pattern.matcher(_).matches)
        if(!this.AllowNumber){
            tokenRdd = tokenRdd.filter(this.regexNum.pattern.matcher(_).matches)
        }
        if(this.ChineseEnable){
            tokenRdd = tokenRdd.filter(this.CHregex.pattern.matcher(_).matches)
        }
        //StopOtherWords.foreach(print(_))
        tokenRdd = tokenRdd.filterNot(StopOtherWords.contains(_)).filter(_.length > this.LimitCount)
        tokenRdd.toSeq
    }

    /**
      * 获取指定的sparkContext实例对象,后期考虑加入参数指定功能
      *
      * @return
      */
    def getSparkInc(): SparkContext = {
        val conf = new SparkConf()
            .setAppName("scalaTest")
            .setMaster("local[*]")
            .set("spark.mongodb.input.uri", this.MongoUri)
            .set("spark.mongodb.output.uri", this.MongoUri + "_out")
        val sc = new SparkContext(conf)
        sc
    }

    def makeStopWordsRemover(sparkContext: SparkContext,rddWords: RDD[Seq[String]]) = {
        val remover = new StopWordsRemover()
        //将rdd转化为dataframe
        val sparkSql = new SQLContext(sparkContext)

        //将rdd转化为RDD[Row]
        val fields = Array(StructField("html", ArrayType(StringType),nullable = true))
        val rowRDD = rddWords.map(attributes => Row(attributes))
        val schema = StructType(fields)
        val rowDF = sparkSql.createDataFrame(rowRDD,schema).toDF()
        remover.setInputCol("html").setOutputCol("words")
        val stopWords = remover.transform(rowDF)
//        stopWords.collect().foreach(println(_))
        import sparkSql.implicits._
        val words = stopWords.select("words").as[Array[String]]
        (words ,sparkSql)
    }

}
