package com.shujia.spark.mllib

import com.shujia.spark.util.IK
import org.apache.spark.SparkContext
import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel}
import org.apache.spark.ml.feature.{HashingTF, IDF, IDFModel}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.functions.udf
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object Demo7TextClass {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[8]")
      .appName("image")
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._
    val sc: SparkContext = spark.sparkContext
    //1、读取数据
    val linesRDD: RDD[String] = sc.textFile("data/comment.txt")

    //解析数据
    val commentRDD: RDD[(Double, String)] = linesRDD.map(line => {
      val split: Array[String] = line.split(",")
      val label: Double = split(2).toDouble
      val text: String = split(3)
      (label, text)
    })

    //转换成DF
    val commentDF: DataFrame = commentRDD
      .toDF("label", "text")

    //定义自定义函数
    //val segment: UserDefinedFunction = udf((text: String) => IK.segment(text))

    //1、分词
    val wordsDF: DataFrame = commentDF
      .select($"label", $"text")
      .map { case Row(label:Double, text: String) => (label, IK.segment(text)) }
      .filter(kv => kv._2.nonEmpty)
      .toDF("label", "words")

    wordsDF.printSchema()

    //2、增加TF-IDF： 用于表示一个词对于当前文本的重要程度
    val tF: HashingTF = new HashingTF()
      .setInputCol("words")
      .setOutputCol("rawFeatures")
      .setNumFeatures(10000)

    val tfDF: DataFrame = tF.transform(wordsDF)

    val idf: IDF = new IDF()
      .setInputCol("rawFeatures")
      .setOutputCol("features")
    //训练id模型
    val idfModel: IDFModel = idf.fit(tfDF)

    val data: DataFrame = idfModel.transform(tfDF)

    val resultDF: DataFrame = data.select($"label", $"features")

    //2、将数据切分成训练集和测试集
    //训练集负责训练模型，测试集负责测试模型的准确率
    val Array(train, test) = resultDF.randomSplit(Array(0.8, 0.2))

    //3、选择算法
    val regression = new LogisticRegression()

    //4、将训练集带入算法训练模型
    val model: LogisticRegressionModel = regression.fit(train)

    //5、将测试集带入模型评估模型准确率
    val testDF: DataFrame = model.transform(test)

    testDF.show(false)

    //6、计算准确率
    val p: Double = testDF.where($"label" === $"prediction").count().toDouble / testDF.count() * 100
    println(s"准确率：$p")
  }
}
