package cn.doitedu.ml.comment

import java.net.URLDecoder
import java.util

import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.seg.common.Term
import org.apache.spark.ml.classification.NaiveBayes
import org.apache.spark.ml.feature.{HashingTF, IDF}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.storage.StorageLevel

import scala.collection.mutable

object CommentClassifyTrainner {
  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder().appName("评论语义分析").master("local").getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    // 加载好评数据集
    val good = spark.read.textFile("user_portrait/data/comment/good").selectExpr("value as cmt", "cast(0.0 as double) as  label")

    // 加载中评数据集
    val general = spark.read.textFile("user_portrait/data/comment/general").selectExpr("value as cmt", "cast(1.0 as double) as  label")

    // 加载差评数据集
    val poor = spark.read.textFile("user_portrait/data/comment/poor").selectExpr("value as cmt", "cast(2.0 as double) as  label")


    // 将3类样本集合并
    val sample = good.union(general).union(poor)

    /**
     *
     * 特征工程
     *
     **/
    val stopwordsRdd = spark.sparkContext.textFile("user_portrait/data/comment/stopwords")
    val stopwordsSet = stopwordsRdd.collect().toSet
    val bc = spark.sparkContext.broadcast(stopwordsSet)


    // 中文分词
    val fenci: UserDefinedFunction = udf((cmt: String) => {
      val terms: util.List[Term] = HanLP.segment(cmt)
      import scala.collection.JavaConversions._
      terms.map(term => term.word).toSeq
    })


    val wordsSample = sample
      .select(fenci('cmt) as "words", 'label)
      .rdd
      .map(row => {
        val stpwds = bc.value

        val words = row.getSeq[String](0)
        val label = row.getAs[Double]("label")
        val filteredWords = words.filter(w => !stpwds.contains(w))

        (filteredWords, label)
      }).toDF("words", "label")


    // 将词数组，向量化
    val hashingTF = new HashingTF()
      .setInputCol("words")
      .setNumFeatures(1000000)
      .setOutputCol("tfvec")

    val tfvec = hashingTF.transform(wordsSample)
    tfvec.persist(StorageLevel.MEMORY_AND_DISK_SER)

    val idf = new IDF()
      .setInputCol("tfvec")
      .setOutputCol("tfidfvec")
    val idfModel = idf.fit(tfvec)

    val tfidf: DataFrame = idfModel.transform(tfvec).drop("words").drop("tfvec")

    /**
     * 训练朴素贝叶斯算法模型
     */
    val naiveBayes = new NaiveBayes()
      .setFeaturesCol("tfidfvec")
      .setSmoothing(0.01)
      .setLabelCol("label")
      .setPredictionCol("prediction")

    // 将样本集划分为“训练集”和“测试集”
    val Array(train, test) = tfidf.randomSplit(Array(0.6, 0.4))

    // 用训练集来训练模型
    val bayesModel = naiveBayes.fit(train)

    bayesModel.save("user_portrait/data/comment/model")

    // 用训练好的模型，来对测试集进行预测
    val predict = bayesModel.transform(test).drop("tfidfvec")

    predict.show(100, false)

    // 给出模型的预测准确率评估：
    val total = predict.count()
    predict.selectExpr(s"count(if(label=prediction,1,null))/${total}").show(100, false)

    spark.close()
  }
}
