package com.fudian.spark_platform

import java.nio.file.Paths
import java.util.Base64

import com.huaban.analysis.jieba.{JiebaSegmenter, WordDictionary}
import com.mongodb.util.JSON
import org.ansj.library.DicLibrary
import org.ansj.splitWord.analysis.DicAnalysis
import org.apache.commons.lang.StringUtils
import org.apache.spark.ml.feature.StopWordsRemover
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{ArrayType, StringType, StructField, StructType}
import org.apache.spark.sql.{Row, SQLContext}
import org.apache.spark.SparkContext

import scala.io.Source


object MLUtils extends Serializable{

    //实例化结巴分词对象
    var jiebaOp = new JiebaSegmenter()

    var MongoUri = "mongodb://127.0.0.1:27017/fx168.forextime"
    var SparkMaster = "local[*]"
    var AllowNumber = false
    var ChineseEnable = false
    var LimitCount = 1
    var StopOtherWords:Set[String] = Set()
    var OutputWay = "console"

    //spark相关参数
    var DataSourceType = "mongo"

    var RedisHost = "127.0.0.1"
    var RedisPort = 6379

    //待过滤内容,去掉抓取过程中的非法和停用词
    var ZHStopWords:Set[String] = Set()

    var ENStopWords:Set[String] = Set()

    var UserStopWords:Set[String] = Set()

    /**
      * 获取停用词表中的停用词
      *
      * @return
      */
    def getDefaultStopWords: (Array[String], Array[String], Array[String]) = {
        var tempArr = ""
        for(line <- Source.fromURL(this.getClass.getClassLoader.getResource("stopWords_en.txt")).getLines()) {
            tempArr = tempArr + " " + line
        }
        var tempArrZh = ""
        for(line <- Source.fromURL(this.getClass.getClassLoader.getResource("stopWords_zh.txt")).getLines()) {
            tempArrZh = tempArrZh + " " + line
        }
        var userArr = ""
        for(line <- Source.fromURL(this.getClass.getClassLoader.getResource("user_stop.txt")).getLines()) {
            userArr = userArr + " " + line
        }
        (tempArr.split(" "), tempArrZh.split(" "),userArr.split(" "))
    }

    //过滤掉纯数字内容
    val regexNum = {
        "[^0-9]*".r
    }

    //过滤非中文
    val CHregex = {
        "[\\u4e00-\\u9fa5]*".r
    }

    //baseReg
    val baseRegex = {
        "[a-zA-Z0-9\\u4e00-\\u9fa5]*".r
    }

    /**
      * 加载用户自定义字典
      */
    def loadDict(pathStr:String): Unit = {
        if(pathStr.length > 0 ){
            val path = Paths.get(this.getClass.getClassLoader.getResource(pathStr).getPath)
            WordDictionary.getInstance().loadUserDict(path)
        }else{
            val path = Paths.get(this.getClass.getClassLoader.getResource("dic.txt").getPath)
            WordDictionary.getInstance().loadUserDict(path)
        }
    }

    def loadANSJDict():Unit = {
        for(line <- Source.fromURL(this.getClass.getClassLoader.getResource("dic.txt")).getLines()) {
            DicLibrary.insert(DicLibrary.DEFAULT,line,"userDefine",1000)
        }
    }


    def settingsConf(conf: String) {
        //base64解码
        val jsonStr = new String(Base64.getDecoder.decode(conf))
        //接受到的参数一个json字符串
        val jsonConf = JSON.parse(jsonStr)
        val confObj = false
        jsonConf match {
            case Some(map:Map[String, Any]) => setConf(map)
            case None => println("解析参数配置失败")
            case other => println("非法的参数结构: " + other)
        }
    }

    def setDefaultConf(): Unit = {
        val (enList , zhList, userList) = getDefaultStopWords
        ENStopWords = enList.toSet
        ZHStopWords = zhList.toSet
        UserStopWords = userList.toSet
        if(ChineseEnable){
            StopOtherWords =  ZHStopWords
        }else{
            StopOtherWords = ENStopWords
        }
    }

    /**
      * 配置公有的数据库等配置信息
      *
      * @param conf 配置Map
      */
    def setConf(conf:Map[String,Any]) = {
        this.AllowNumber = java.lang.Boolean.parseBoolean(conf.getOrElse("AllowNumber",false).toString)
        this.ChineseEnable = java.lang.Boolean.parseBoolean(conf.getOrElse("ChineseEnable",false).toString)
        this.MongoUri = conf.getOrElse("MongoUri",this.MongoUri).toString
        this.LimitCount = java.lang.Integer.parseInt(conf.getOrElse("LimitCount",this.LimitCount).toString)
        val stopWordsStr = conf.getOrElse("StopOtherWords"," ").toString
        val stopWordsArr = stopWordsStr.split(" ").toSet
        if(this.ChineseEnable) {
            StopOtherWords = ZHStopWords | stopWordsArr
        }else{
            StopOtherWords = ENStopWords | stopWordsArr
        }
        this.OutputWay = conf.getOrElse("OutputWay",this.OutputWay).toString
        this.SparkMaster = conf.getOrElse("SparkMaster",this.SparkMaster).toString
        this.DataSourceType = conf.getOrElse("DataSourceType",this.DataSourceType).toString

        this.RedisHost = conf.getOrElse("RedisHost",this.RedisHost).toString
        this.RedisPort = java.lang.Integer.parseInt(conf.getOrElse("RedisPort",this.RedisPort).toString)
    }


    /**
      * 调用结巴分词,将中文分词
      *
      * @param content 待分词的字符串
      * @return
      */
    def jieba(content: String): String = {
        val data = jiebaOp.sentenceProcess(content)
        //为了可以调用tokenize方法,我们这里将生成的list数据,通过空格链接起来,返回一个string
        val results = StringUtils.join(data, " ")
        results
    }

    def ansjCut(content:String): String = {
        val res = DicAnalysis.parse(content)
        val resNum = res.getTerms.size()
        var resString = ""
        for( a <- 0 to resNum-1){
            resString += res.get(a).getName + " "
        }
        resString
    }

    /**
      * 对文本文字进行分词操作,并且过滤明显不合适的词组
      *
      * @param content 待分割的词语组成的文本
      * @return
      */
    def tokenize(content: String): Seq[String] = {
        var tokenRdd = content.split(" ").map(_.toLowerCase).map(_.trim()).filter(this.baseRegex.pattern.matcher(_).matches)
        if(!this.AllowNumber){
            tokenRdd = tokenRdd.filter(this.regexNum.pattern.matcher(_).matches)
        }
        if(this.ChineseEnable){
            tokenRdd = tokenRdd.filter(this.CHregex.pattern.matcher(_).matches)
        }
        //StopOtherWords.foreach(print(_))
        tokenRdd = tokenRdd.filterNot(this.StopOtherWords.contains(_)).
            filterNot(this.UserStopWords.contains(_)).
            filter(_.length > this.LimitCount)
        //用户停用词

        tokenRdd.toSeq
    }

    def isFanfa(content:Seq[String]):Seq[String] = {
        val fanfaWords = "国家级、世界级、最高级、最佳、最大、第一、唯一、首个、首选、最好、最大、精确、顶级、最高、最低、最、最具、最便宜、最先进、最大程度、最新技术、最先进科学、国家级产品、填补国内空白、绝对、独家、首家、最先进、第一品牌、金牌、名牌、优秀、最先、顶级、独家、全网销量第一、全球首发、全国首家、全网首发、世界领先、顶级工艺、最新科学、最新技术、最先进加工工艺、最时尚、极品、顶级、顶尖、终极、最受欢迎、王牌、销量冠军、第一、NO.1、Top1、极致、永久、王牌、掌门人、领袖品牌、独一无二、独家、绝无仅有、前无古人、史无前例、万能"
        val fanfaArr = fanfaWords.split("、")
        val booleanRes = content.filter(fanfaArr.contains(_))
        booleanRes
    }


    /**
      * 高级停用词调用
 *
      * @param sparkContext
      * @param rddWords
      * @return
      */
    def makeStopWordsRemover(sparkContext: SparkContext,rddWords: RDD[Seq[String]]) = {
        val remover = new StopWordsRemover()
        //将rdd转化为dataframe
        val sparkSql = new SQLContext(sparkContext)

        //将rdd转化为RDD[Row]
        val fields = Array(StructField("html", ArrayType(StringType),nullable = true))
        val rowRDD = rddWords.map(attributes => Row(attributes))
        val schema = StructType(fields)
        val rowDF = sparkSql.createDataFrame(rowRDD,schema).toDF()
        remover.setInputCol("html").setOutputCol("words")
        val stopWords = remover.transform(rowDF)
        //        stopWords.collect().foreach(println(_))
        import sparkSql.implicits._
        val words = stopWords.select("words").as[Array[String]]
        (words ,sparkSql)
    }

}
