package com.fwmagic.spark.ml.tfidf

import java.util

import com.fwmagic.spark.util.SparkUtils
import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.seg.common.Term
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.ml.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.ml.feature.{HashingTF, IDF, IDFModel}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

/**
 * 利用分词，朴素贝叶斯对文本内容（好评，中评，差评）样本(80%数据)训练模型和对20%数据进行预测
 */
object CommentClassifyModelTrain {

  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkUtils.getSparkSession(this.getClass.getSimpleName)
    import spark.implicits._
    import org.apache.spark.sql.functions._

    //好评数据
    val good: DataFrame = spark.read.textFile("data/tfidf/good.txt").selectExpr("value as comment", "cast(0.0 as double) as label")

    //中评数据
    val general: DataFrame = spark.read.textFile("data/tfidf/general.txt").selectExpr("value as comment", "cast(1.0 as double) as label")

    //差评数据
    val poor: DataFrame = spark.read.textFile( "data/tfidf/poor.txt").selectExpr("value as comment", "cast(2.0 as double) as label")

    //合并样本数据集
    val sample: Dataset[Row] = good.union(general).union(poor)

    sample.show(10, false)

    //加载停止词，收集到Driver端，广播出去
    val stopWordsSet: Set[String] = spark.read.textFile("data/tfidf/stopwords/stopwords.txt").collect().toSet

    //广播变量
    val bc: Broadcast[Set[String]] = spark.sparkContext.broadcast(stopWordsSet)

    //对中文评语进行分词处理
    val wordsSmaple: DataFrame = sample.map(row => {
      val stopWords: Set[String] = bc.value

      val lable: Double = row.getAs[Double]("label")
      val comment: String = row.getAs[String]("comment")
      //对中文进行分词
      val terms: util.List[Term] = HanLP.segment(comment)
      import scala.collection.JavaConversions._
      //将terms列表转成字符串词组，并过滤停止词
      val words: Array[String] = terms.map(term => term.word).toArray.filter(word => !stopWords.contains(word) /*&& word.length > 1*/)
      (words, lable)
    }).toDF("words", "label")

    wordsSmaple.show(100, false)


    //特征向量化（要用到hash词映射算法，tf-idf特征值算法提取）
    val tf: HashingTF = new HashingTF()
      .setInputCol("words") //输入特征列
      .setNumFeatures(100000) //100000个数的数组
      .setOutputCol("tf_vec") //得到tf值的向量

    //用tf算法，将词数组，映射成tf值向量
    val tfVecs: DataFrame = tf.transform(wordsSmaple)

    tfVecs.show(30,false)

    //用idf算法，将上面tf特征向量集合变成TF-IDF特征向量集合
    val idf: IDF = new IDF()
      .setInputCol("tf_vec")
      .setOutputCol("tf_idf_vec")

    //建模
    val model: IDFModel = idf.fit(tfVecs)

    //tf特征向量集合变成TF-IDF特征向量集合
    val tfidfVecs: DataFrame = model.transform(tfVecs)

    tfidfVecs.show(30,false)

    //调用朴素贝叶斯算法
    //别把整个样本集都作为训练原料，要留一点来做效果测试
    //将样本集划分成两部分，80%作为训练集，20%作为测试集
//    val Array(trans,test) = tfidfVecs.randomSplit(Array[Double](0.8, 0.2))
    val array: Array[Dataset[Row]] = tfidfVecs.randomSplit(Array[Double](0.8, 0.2))
    val trans: Dataset[Row] = array(0)
    val test: Dataset[Row] = array(1)

    //训练朴素贝叶斯模型
    val bayes: NaiveBayes = new NaiveBayes()
      .setLabelCol("label")   //标签列
      .setFeaturesCol("tf_idf_vec")  //特征列，特征向量
      .setSmoothing(1.0)   //平滑系数
      //.setModelType("multionmial")

    //得到模型，概率表
    val bayesModel: NaiveBayesModel = bayes.fit(trans)

    //模型训练，预测结果
    val predictResult: DataFrame = bayesModel.transform(test)

    predictResult.show(100,false)


    //统计预测结果准确率
    val total: Long = predictResult.count()  //预测总数
    val correct: Long = predictResult.where("label=prediction").count() //准确预测数

    println(s"===> correct:${correct}")
    println(s"===> total:${total}")

    val res: Double = correct / total.toDouble

    println(s"===> res:${res}")

    spark.close()
  }

}
