package com.etc

import java.util.Random

import org.apache.spark.SparkConf
import org.apache.spark.ml.classification.NaiveBayes
import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
import org.apache.spark.ml.feature.{HashingTF, IDF}
import org.apache.spark.sql.SparkSession

/**
  * 情感分类
  * 1.数据预处理
  * 2.文本特征提取
  * 3.训练分类模型
  *
  */
object Word {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local").setAppName("SA")
    val spark = SparkSession.builder().config(conf).getOrCreate()

    import spark.implicits._

    val rand = new Random()

    val neg = spark.read.textFile("neg.txt").map(
      line => {
        (line.split(" ").filter(!_.equals(" ")),0,rand.nextDouble())
      }).toDF("words","value","random")

    val pos = spark.read.textFile("pos.txt").map(
      line => {
        (line.split(" ").filter(!_.equals(" ")),1,rand.nextDouble())
      }).toDF("words","value","random") //思考：这里把inner function提出重用来如何操作


    val data = neg.union(pos).sort("random")


    val hashingTF = new HashingTF()
      .setInputCol("words")
      .setOutputCol("hashing")
      .transform(data)


    val idfsmodel = new IDF()
      .setInputCol("hashing")
      .setOutputCol("tfidf")
      .fit(hashingTF)


    val transformedData = idfsmodel.transform(hashingTF)

    val Array(train,test) = transformedData.randomSplit(Array(0.7,0.3))

    //根据抽取到的文本特征，使用分类器进行分类，这是一个二分类问题
    //分类器是可替换的
    val bayes = new NaiveBayes()
      .setFeaturesCol("tfidf") //X
      .setLabelCol("value")    //y
      .fit(train)
    val result = bayes.transform(test) //交叉验证
    result.show(false)

    //对模型的准去率进行评估
    val evaluator = new MulticlassClassificationEvaluator()
      .setLabelCol("value")
      .setPredictionCol("prediction")
      .setMetricName("accuracy")
    val accuracy = evaluator.evaluate(result)
    println(s"""accuracy is $accuracy""")

  }
}
