package com.fudian.spark_platform.Utils

import java.nio.file.Paths

import com.fudian.spark_platform.Configure.MillConfig
import com.huaban.analysis.jieba.{JiebaSegmenter, WordDictionary}
import org.ansj.library.DicLibrary
import org.ansj.splitWord.analysis.DicAnalysis
import org.apache.commons.lang.StringUtils
import org.apache.spark.sql.types._
import scala.collection.mutable.ArrayBuffer

import scala.io.Source

object AssertUtils extends Serializable {

    /**
      * 调用结巴分词,将中文分词
      *
      * @param content 待分词的字符串
      * @return
      */
    def jieba(content: String): String = {
        val data = new JiebaSegmenter().sentenceProcess(content)
        //为了可以调用tokenize方法,我们这里将生成的list数据,通过空格链接起来,返回一个string
        val results = StringUtils.join(data, " ")
        results
    }

    def loadANSJDict(): Unit = {
        for (line <- Source.fromURL(this.getClass.getClassLoader.getResource("dic.txt")).getLines()) {
            DicLibrary.insert(DicLibrary.DEFAULT, line, "userDefine", 1000)
        }
    }

}

class MLUtils(m: MillConfig) extends Serializable {

    var AllowNumber: Boolean = m.millingConfig("AllowNumber").asInstanceOf[Boolean]
    var ChineseEnable: Boolean = m.millingConfig("ZHAble").asInstanceOf[Boolean]
    var LimitCount: Int = m.millingConfig("LimitCount").asInstanceOf[Int]
    var FastStopWords: Array[String] = m.millingConfig("FastStopWords").toString.split(" ")

    //待过滤内容,去掉抓取过程中的非法和停用词
    var ZHStopWords: Array[String] = m.millingConfig("ZHStopWords").asInstanceOf[Array[String]]

    var ENStopWords: Array[String] = m.millingConfig("ENStopWords").asInstanceOf[Array[String]]

    var UserStopWords: Array[String] = m.millingConfig("USStopWords").asInstanceOf[Array[String]]


    //过滤掉纯数字内容
    val regexNum = {
        "[^0-9]*".r
    }

    //过滤非中文
    val CHregex = {
        "[\\u4e00-\\u9fa5]*".r
    }

    //baseReg
    val baseRegex = {
        "[a-zA-Z0-9\\u4e00-\\u9fa5]*".r
    }

    /**
      * 加载用户自定义字典
      */
    def loadDict(pathStr: String): Unit = {
        if (pathStr.length > 0) {
            val path = Paths.get(this.getClass.getClassLoader.getResource(pathStr).getPath)
            WordDictionary.getInstance().loadUserDict(path)
        } else {
            val path = Paths.get(this.getClass.getClassLoader.getResource("dic.txt").getPath)
            WordDictionary.getInstance().loadUserDict(path)
        }
    }

    def loadANSJDict(): Unit = {
        for (line <- Source.fromURL(this.getClass.getClassLoader.getResource("dic.txt")).getLines()) {
            DicLibrary.insert(DicLibrary.DEFAULT, line, "userDefine", 1000)
        }
    }


    def ansjCut(content: String): String = {
        val res = DicAnalysis.parse(content)
        val resNum = res.getTerms.size()
        var resString = ""
        for (a <- 0 to resNum - 1) {
            resString += res.get(a).getName + " "
        }
        resString
    }

    /**
      * 对文本文字进行分词操作,并且过滤明显不合适的词组
      *
      * @param content 待分割的词语组成的文本
      * @return
      */
    def tokenize(content: String): Seq[String] = {
        var tokenRdd = content.split(" ").map(_.toLowerCase).map(_.trim()).filter(this.baseRegex.pattern.matcher(_).matches)
        if (!this.AllowNumber) {
            tokenRdd = tokenRdd.filter(this.regexNum.pattern.matcher(_).matches)
        }
        if (this.ChineseEnable) {
            tokenRdd = tokenRdd.filter(this.CHregex.pattern.matcher(_).matches)
        }

        tokenRdd = tokenRdd.filterNot(this.UserStopWords.contains(_))
            .filterNot(this.ZHStopWords.contains(_))
            .filterNot(this.ENStopWords.contains(_))
            .filterNot(this.FastStopWords.contains(_))
            .filter(_.length >= this.LimitCount)
        tokenRdd.toSeq
    }

    /**
      * 正常的中文停用词去除
      *
      * @param words
      * @return
      */
    def normalZhStop(words: Seq[String]): Seq[String] = {
        words.filterNot(this.UserStopWords.contains(_))
            .filterNot(this.ZHStopWords.contains(_))
            .filterNot(this.FastStopWords.contains(_))
            .filter(_.length >= this.LimitCount)
    }

    /**
      * 正常的英文停用词去除
      *
      * @param words
      * @return
      */
    def normalENStop(words: Seq[String]): Seq[String] = {
        words.filterNot(this.UserStopWords.contains(_))
            .filterNot(this.ENStopWords.contains(_))
            .filterNot(this.FastStopWords.contains(_))
            .filter(_.length > this.LimitCount)
    }


    def isFanfa(content: Seq[String]): Seq[String] = {
        val fanfaWords = "国家级、世界级、最高级、最佳、最大、第一、唯一、首个、首选、最好、最大、精确、顶级、最高、最低、最、最具、最便宜、最先进、最大程度、最新技术、最先进科学、国家级产品、填补国内空白、绝对、独家、首家、最先进、第一品牌、金牌、名牌、优秀、最先、顶级、独家、全网销量第一、全球首发、全国首家、全网首发、世界领先、顶级工艺、最新科学、最新技术、最先进加工工艺、最时尚、极品、顶级、顶尖、终极、最受欢迎、王牌、销量冠军、第一、NO.1、Top1、极致、永久、王牌、掌门人、领袖品牌、独一无二、独家、绝无仅有、前无古人、史无前例、万能"
        val fanfaArr = fanfaWords.split("、")
        val booleanRes = content.filter(fanfaArr.contains(_))
        booleanRes
    }


    /**
      * 去掉明显不是中文的数据
      *
      * @param str
      * @return
      */
    def isFilter(str: String): Boolean = {
        var isFilter = true
        if (this.CHregex.findAllMatchIn(str).mkString.length == 0) {
            isFilter = false
        }
        isFilter
    }

    /**
      *
      * 根据字段名组建返回的数据类型
      *
      * @param structString
      * @return
      */
    def getResultsDataStructType(structString: String): StructType = {
        val dataTypeArray = ArrayBuffer[StructField]()
        val fields_results = structString.split(" ")
            .map(fieldName => {
                val filedTypeAndName = fieldName.split(":")
                StructField(filedTypeAndName(0),getStringTypeToDataStructType(filedTypeAndName(1)),nullable = true)
            })
        val schema_results = StructType(fields_results)
        schema_results
    }

    /**
      *
      * 根据类型字符串返回对应的类型
      * @param typeString
      * @return
      */
    def getStringTypeToDataStructType(typeString:String):DataType = {

        typeString match {
            case "String" => {
                StringType
            }
            case "Int" => {
                IntegerType
            }
            case "Array" => {
                ArrayType(StringType,true)
            }
            case "Double" => {
                DoubleType
            }
            case "Float" => {
                FloatType
            }
            case _ => {
                StringType
            }
        }
    }

}
