package com.shujia.spark.mllib

import org.apache.spark.ml.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.ml.feature.{HashingTF, IDF, IDFModel, Tokenizer}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Demo_TextClass {

  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local[4]")
      .appName("text")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val textData: DataFrame = spark
      .read
      .format("csv")
      .option("sep","\t")
      .schema("label DOUBLE,text STRING")
      .load("F:\\视频\\spark\\train.txt")

    val kvData: Dataset[(Double, String)] = textData.as[(Double,String)]

    //通过ik分词器进行分词
    val iKDF: Dataset[Row] = kvData.map{
      case (label: Double,text: String) =>

        //分词
      val words: List[String] = IK.fit(text)

      //将多个单词用空格拼接
      val str: String = words.mkString(" ")

        (label,str)

    }.toDF("label","text")
      .where($"text" =!= "")
      //.show(100)

    //需要通过英文分词器处理一下
    val tokenizer: Tokenizer = new Tokenizer().setInputCol("text").setOutputCol("words")

    val tokenizerDF: DataFrame = tokenizer.transform(iKDF)

    //增加次频
    val hashingTF: HashingTF = new HashingTF()
      .setInputCol("words")
      .setOutputCol("rawFeatures")

    val hashingTFDF: DataFrame = hashingTF.transform(tokenizerDF)

    //hashingTFDF.show()

    //逆文本频率

    val idf: IDF = new IDF().setInputCol("rawFeatures").setOutputCol("features")

    val iDFModel: IDFModel = idf.fit(hashingTFDF)

    val idfDF: DataFrame = iDFModel.transform(hashingTFDF)

    val splitDF: Array[Dataset[Row]] = idfDF.randomSplit(Array(0.8,0.2))

    val trainDF: Dataset[Row] = splitDF(0)
    val testDF: Dataset[Row] = splitDF(1)

    /**
      * 文本分类一般使用贝叶斯分类（垃圾邮件分类）
      */

    val naiveBayes = new NaiveBayes()

    //将训练数据带入算法
    val model: NaiveBayesModel = naiveBayes.fit(trainDF)

    model.save("data/textModel")

    //将测试数据带入模型测试准确率
    val result: DataFrame = model.transform(testDF)

    result
      .select(sum(when($"label" === $"prediction",1).otherwise(0)) / count($"label"))
      .show()


  }

}
