package MLlib

import com.hankcs.hanlp.HanLP
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import util.SparkUtil
import java.util

import com.hankcs.hanlp.seg.common.Term
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.ml.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.ml.feature.{HashingTF, IDF, IDFModel}
import org.apache.spark.rdd.RDD

object CommentClassify {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkUtil.getSparkSession(this.getClass.getSimpleName)
    import spark.implicits._
    import org.apache.spark.sql.functions._
    // 加载评论数据
    val good: Dataset[String] = spark.read.textFile("E:\\doit项目资料\\13期项目资料\\day18\\comment\\good")
    val general: Dataset[String] = spark.read.textFile("E:\\doit项目资料\\13期项目资料\\day18\\comment\\general")
    val poor: Dataset[String] = spark.read.textFile("E:\\doit项目资料\\13期项目资料\\day18\\comment\\poor")
    val goodDF: DataFrame = good.selectExpr("value as comment", "cast(2.0 as double) as label")
    val generalDF: DataFrame = general.selectExpr("value as comment", "cast(1.0 as double) as label")
    val poorDF: DataFrame = poor.selectExpr("value as comment", "cast(0.0 as double) as label")
    // 合并三类样本
    val sample: Dataset[Row] = goodDF.union(generalDF).union(poorDF)

    // 加载停止词
    val SW: Dataset[String] = spark.read.textFile("userprofile/data/Demo/stopWords.txt")
    val set: Set[String] = SW.rdd.collect().toSet
    val bc: Broadcast[Set[String]] = spark.sparkContext.broadcast(set)

    // 对中文评语进行分词处理
    val wordsSample: DataFrame = sample.map(row => {
      val stopWords: Set[String] = bc.value
      // 从row中取出两个字段
      val comment: String = row.getAs[String]("comment")
      val label: Double = row.getAs[Double]("label")
      // 对评语分词
      val terms: util.List[Term] = HanLP.segment(comment) // 这是Java数组！！！需要导入隐式转换转成Scala数组
      import scala.collection.JavaConversions._
      val words = terms.map(_.word).toArray.filter(w => !stopWords.contains(w)) // 将词列表转成字符串数组并过滤停止词
      (words, label)
    }).toDF("words", "label")
    wordsSample.show(100, truncate = false)
    // 将词特征向量化（要用到hash映射算法、TF-IDF算法）
    val tf: HashingTF = new HashingTF()
      .setInputCol("words") // 设置特征（词的数组）是哪一列
      .setNumFeatures(100000) // 映射成多长的向量
      .setOutputCol("tf_vec") // 变成向量后输出到哪一列（取名字）
    // 用tf算法，将词分组，映射成tf值向量
    val tfVecs: DataFrame = tf.transform(wordsSample)

    // 用IDF算法，将上面的TF特征向量集合变成TF-IDF特征值向量集合
    val idf: IDF = new IDF()
      .setInputCol("tf_vec")
      .setOutputCol("tf_idf_vec")
    val iDFModel: IDFModel = idf.fit(tfVecs)
    val TFIDFVecs: DataFrame = iDFModel.transform(tfVecs)
    TFIDFVecs.show(100, truncate = false)

    // 训练朴素贝叶斯模型（别用整个样本集都作为训练原料，要留一点做预测效果测试）
    // 将样本集划分成两部分，80%作为训练集，20%作为测试集
    //    val array: Array[Dataset[Row]] = TFIDFVecs.randomSplit(Array(0.8, 0.2))
    val Array(train, test) = TFIDFVecs.randomSplit(Array(0.8, 0.2))
    val bayes: NaiveBayes = new NaiveBayes()
      .setFeaturesCol("tf_idf_vec")
      .setLabelCol("label")
      .setSmoothing(1.0)
    val bayesModel: NaiveBayesModel = bayes.fit(train)
    //    bayesModel.save("...")
    val predict: DataFrame = bayesModel.transform(test)
    //    predict.show(100, truncate = false)
    // 统计准确率
    val total: Long = predict.count()
    val correct: Long = predict.where("label=prediction").count()
    println("准确率为： " + (correct / total).toDouble)

    spark.close()
  }
}
