package cn.doitedu.ml.comment.classify

import java.util

import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.seg.common.Term
import org.apache.spark.ml.classification.NaiveBayes
import org.apache.spark.ml.feature.{HashingTF, IDF}
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

/**
 * 评论样本数据的朴素贝叶斯模型训练器
 */
object NaiveBayesTrain {
  def main(args: Array[String]): Unit = {

    val spark = SparkSession
      .builder()
      .appName("商品评论数据集的朴素贝叶斯分类模型训练")
      .master("local")
      .getOrCreate()
    import spark.implicits._


    //样本集加载
    val general = spark.read.textFile("userprofile/data/comment_sample/general")
    val good = spark.read.textFile("userprofile/data/comment_sample/good")
    val poor = spark.read.textFile("userprofile/data/comment_sample/poor")

    // 加上类别标签：  差评：0   中评：1   好评：2
    val labeledPoor = poor.map(s => (0.0, s))
    val labeledGeneral = general.map(s => (1.0, s))
    val labeledGood = good.map(s => (2.0, s))


    // 将三类评论合并在一起
    val sample: Dataset[(Double, String)] = labeledPoor.union(labeledGeneral).union(labeledGood)

    // 加载停止词字典
    val stopWords = spark.read.textFile("userprofile/data/comment_sample/stopwords").collect().toSet
    val bc = spark.sparkContext.broadcast(stopWords)


    // 对样本数据进行分词
    val wordsDS: DataFrame= sample.map(tp=>{
      // 从广播变量中获取停止词字典
      val stpwds = bc.value

      val label = tp._1
      val terms: util.List[Term] = HanLP.segment(tp._2)
      import scala.collection.JavaConversions._
      val words: Array[String] = terms.map(term=>term.word).toArray.filter(!stpwds.contains(_))

      (label,words)
    }).toDF("label","words")

    // 利用hashingTF算法，将词数组转为 TF特征值向量
    val hashingTF = new HashingTF()
      .setInputCol("words")
      .setOutputCol("tfvec")
      .setNumFeatures(1000000)

    val tfvecDF: DataFrame = hashingTF.transform(wordsDS)

    // 将TF特征值向量  转成  TF-IDF特征值 向量 ，利用的算法是： IDF
    val idf = new IDF()
        .setInputCol("tfvec")
        .setOutputCol("tfidfvec")

    //idf巡训练模型
    val idfModel = idf.fit(tfvecDF)

    //tfidf向量化，得到tfidf的值
    val tfidfVecDF: DataFrame = idfModel.transform(tfvecDF).drop("words","tfvec")

    // 将样本集，拆分成80% 和 20% 两部分，80%的作为训练集， 20%的作为测试集
    val array: Array[DataFrame] = tfidfVecDF.randomSplit(Array(0.8, 0.2))
    val trainSet  = array(0)
    val testSet  = array(1)


    // 训练朴素贝叶斯分类算法模型
    val naiveBayes = new NaiveBayes()
      .setLabelCol("label")
      .setFeaturesCol("tfidfvec")
      .setSmoothing(0.01)

    val model = naiveBayes.fit(trainSet)

    //model.save("userprofile/data/comment_sample/model")

    // val model1 = NaiveBayesModel.load("userprofile/data/comment_sample/model")

    val result = model.transform(testSet).drop("tfidfvec")
    result.show(100,false)

    spark.close()
  }

}
