package com.shujia.spark.mllib

import com.shujia.spark.util.IKUtil
import org.apache.spark.ml.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.ml.feature.{HashingTF, IDF, IDFModel, Tokenizer}
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo8TextClass {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local[6]")
      .appName("Demo5ImageData")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //读取数据
    val textDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", "\t")
      .schema("label DOUBLE,text STRING")
      .load("data/text.txt")


    //对中文做分词
    val ikUDF: UserDefinedFunction = udf((text: String) => {
      //使用ik分词器对中文进行分词
      val words: List[String] = IKUtil.fit(text)
      //将分好的词，按空格拼接成一个字符串
      words.mkString(" ")
    })

    val dataDF: DataFrame = textDF
      //分词
      .select($"label", ikUDF($"text") as "text")
      //过滤脏数据
      .where($"text" =!= "")

    //英文分词器，按照空格分割
    val tokenizer: Tokenizer = new Tokenizer()
      .setInputCol("text") //输入列
      .setOutputCol("words") //输出列

    val wordsData: DataFrame = tokenizer.transform(dataDF)

    /**
     * TF-IDF  ； 用于评估一个词对于当前文章重要程度的标准
     */

    //增加词频
    val hashingTF: HashingTF = new HashingTF()
      .setInputCol("words")
      .setOutputCol("rawFeatures")
      .setNumFeatures(10000)
    val hashingTFDF: DataFrame = hashingTF.transform(wordsData)

    //增加IDF 逆文本频率
    val idf: IDF = new IDF()
      .setInputCol("rawFeatures")
      .setOutputCol("features")

    val idfModel: IDFModel = idf.fit(hashingTFDF)

    //增加IDF
    val rescaledData: DataFrame = idfModel.transform(hashingTFDF)

    //将数据拆分成训练集和测试集
    val Array(train: DataFrame, test: DataFrame) = rescaledData.randomSplit(Array(0.8, 0.2))

    /**
     * 选择算法，训练模型
     * 文本分类：贝叶斯分类
     *
     */

    val naiveBayes = new NaiveBayes()

    //将训练集带入算法训练模型
    val model: NaiveBayesModel = naiveBayes.fit(train)

    //将测试机带入模型评估模型的准确率
    val testDF: DataFrame = model.transform(test)

    val p: Double = testDF.where($"label" === $"prediction").count().toDouble / testDF.count()

    println(s"模型的准确率：$p")


  }

}
