package com.shujia.mllib

import org.apache.spark.ml.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.ml.feature.{HashingTF, IDF, IDFModel, Tokenizer}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Demo9TextClass {
  def main(args: Array[String]): Unit = {


    val spark: SparkSession = SparkSession.builder()
      .appName("kmeans")
      .master("local[8]")
      .config("spark.sql.shuffle.partitions", "2")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //1、读取数据

    val data: DataFrame = spark
      .read
      .format("csv")
      .option("sep", "\t")
      .schema("label DOUBLE, text STRING")
      .load("spark/data/train.txt")
      .repartition(8)

    //2、对数据进行分词
    val wordsDF: Dataset[(Double, List[String])] = data.as[(Double, String)]
      .map(kv => {
        val label: Double = kv._1
        val text: String = kv._2

        //使用IK分词器进行分词
        val words: List[String] = Demo8IK.fit(text)
        (label, words)
      })

    //3、取出章数据
    val filterDF: Dataset[(Double, List[String])] = wordsDF.filter(_._2.length > 2)

    //3、将集合中每一个词语使用空格拼接
    val linesDF: DataFrame = filterDF.map(kv => {
      val label: Double = kv._1
      val line: String = kv._2.mkString(" ")
      (label, line)
    }).toDF("label", "text")


    //英文分词器
    val tokenizer: Tokenizer = new Tokenizer()
      .setInputCol("text")
      .setOutputCol("words")

    //进行英文分词
    val wordsData: DataFrame = tokenizer.transform(linesDF)


    //增加TF
    val hashingTF: HashingTF = new HashingTF()
      .setInputCol("words")
      .setOutputCol("rawFeatures")

    val featurizedData: DataFrame = hashingTF.transform(wordsData)

    //增加IDF
    val idf: IDF = new IDF()
      .setInputCol("rawFeatures")
      .setOutputCol("features")

    //训练IDF的模型
    val idfModel: IDFModel = idf.fit(featurizedData)

    val rescaledData: DataFrame = idfModel.transform(featurizedData)


    //将数据切分成训练集和测试集
    val split: Array[Dataset[Row]] = rescaledData.randomSplit(Array(0.7, 0.3))

    val train: Dataset[Row] = split(0)
    val test: Dataset[Row] = split(1)


    /**
      *
      * 贝叶斯分类：适合做文本分类
      *
      */

    //构建贝叶斯算法
    val naiveBayes = new NaiveBayes()

    //将训练集带入算法，训练模型
    val model: NaiveBayesModel = naiveBayes.fit(train)


    //将测试集带入模型判断模型准确率
    val dataFrame: DataFrame = model.transform(test)


    //计算准确率
    val p: DataFrame = dataFrame.select(sum(when($"label" === $"prediction", 1).otherwise(0)) / count($"label") as "p")

    p.show()


    //保存模型
    idfModel.save("spark/data/idfModel")
    model.save("spark/data/naiveBayes")


  }
}
