package com.shujia.mllib

import org.apache.spark.ml.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.ml.feature.{HashingTF, IDF, IDFModel, Tokenizer}
import org.apache.spark.sql.functions.{count, sum, when}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Demo8TextClass {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession.builder()
      .master("local[8]")
      .appName("person")
      .config("spark.sql.shuffle.partitions", 2)
      .getOrCreate()

    import spark.implicits._

    val texts: DataFrame = spark.read
      .format("csv")
      .schema("label DOUBLE, text STRING")
      .option("sep", "\t")
      .load("spark/data/train.txt")

    /**
      * 通过ik分词器对文本进行分词
      *
      */
    val ikDF: DataFrame = texts.as[(Double, String)]
      .map {
        case (label: Double, text: String) =>
          //对文本进行分词
          val words: String = Demo9IK.fit(text).mkString(" ")

          (label, words)
      }
      .filter(_._2.nonEmpty)
      .toDF("label", "text")


    /**
      * 使用英文分词器对数据做一次转换---必须有
      *
      */

    val tokenizer: Tokenizer = new Tokenizer()
      .setInputCol("text")
      .setOutputCol("words")

    val wordsData: DataFrame = tokenizer.transform(ikDF)

    /**
      * 增加TF
      *
      */

    val hashingTF: HashingTF = new HashingTF()
      .setInputCol("words")
      .setOutputCol("rawFeatures")


    val featurizedData: DataFrame = hashingTF.transform(wordsData)


    /**
      * 增加idf
      *
      */

    val idf: IDF = new IDF()
      .setInputCol("rawFeatures")
      .setOutputCol("features")

    //训练idf的模型
    val idfModel: IDFModel = idf.fit(featurizedData)


    //训练集
    val trainDF: DataFrame = idfModel.transform(featurizedData)


    /**
      * 切分训练集和测试集
      *
      */
    val split: Array[Dataset[Row]] = trainDF.randomSplit(Array(0.7, 0.3))

    val train: Dataset[Row] = split(0)
    val test: Dataset[Row] = split(1)


    /**
      * 选择算法
      *
      * 贝叶斯分类---一般用于文本分类
      *
      */

    val naiveBayes = new NaiveBayes()


    //训练模型
    val model: NaiveBayesModel = naiveBayes.fit(train)


    /**
      * 模型评估
      *
      */

    val frame: DataFrame = model.transform(test)

    val result: DataFrame = frame.select(sum(when($"label" === $"prediction", 1).otherwise(0)) / count($"label"))

    result.show()

    /**
      * 保存模型
      *
      */
    idfModel.save("spark/data/idfmodel")
    model.save("spark/data/NaiveBayesModel")


  }

}
