package cn.doitedu.ml.comment

import java.util

import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.seg.common.Term
import org.apache.spark.ml.classification.NaiveBayes
import org.apache.spark.ml.evaluation.BinaryClassificationEvaluator
import org.apache.spark.ml.feature.{HashingTF, IDF}
import org.apache.spark.mllib.evaluation.MulticlassMetrics
import org.apache.spark.mllib.linalg.Matrix
import org.apache.spark.sql.{Dataset, Row, SparkSession}

import scala.collection.mutable

object BayesModelTrainner {

  def main(args: Array[String]): Unit = {


    val spark = SparkSession.builder()
      .config("spark.sql.shuffle.partitions","10")
      .appName("")
      .master("local[*]")
      .getOrCreate()

    import spark.implicits._

    // 加载样本数据
    val good = spark.read.textFile("d:/comment_sample/good").selectExpr("value as comment","cast(0 as double) as label")
    val genneral = spark.read.textFile("d:/comment_sample/general").selectExpr("value as comment","cast(1 as double) as label")
    val poor = spark.read.textFile("d:/comment_sample/poor").selectExpr("value as comment","cast(2 as double) as label")

    // 合并3类样本数据
    val sample = good.union(genneral).union(poor)


    // 加载停止词典
    val stopwords = spark.read.textFile("D:\\comment_sample\\stopwords").collect().toSet
    val bc = spark.sparkContext.broadcast(stopwords)


    // 评论分词
    val wordsDs = sample.map(row=>{
      val stpwSet = bc.value


      val cmt = row.getAs[String]("comment")
      val label = row.getAs[Double]("label")

      // 对评论分词（分词用分词工具包来做，常用的分词工具包有：庖丁分词，IK分词器，结巴分词，HanLp（自然语言处理工具包）
      val terms: util.List[Term] = HanLP.segment(cmt)
      import scala.collection.JavaConversions._
      val words = terms.map(term => term.word).filter(w => !stpwSet.contains(w)).toArray

      (label,words)
    }).toDF("label","words")

    // 向量化
    val hashingTF = new HashingTF()
      .setNumFeatures(100000)
      .setInputCol("words")
      .setOutputCol("tf")
    val tfDs = hashingTF.transform(wordsDs)  // 如果是： 输入一行，输出一行结果，这种动作用  transform
    //tfDs.show(100,false)

    // 将tf值特征向量，进一步转成tf-idf值 特征向量
    val idf = new IDF()
      .setInputCol("tf")
      .setOutputCol("tfidf")
    val model = idf.fit(tfDs)
    val tfidfDS = model.transform(tfDs).drop("words","tf")

    tfidfDS.cache()
    tfidfDS.show(100,false)



    // 拆分样本数据为  训练集  和  测试集
    val array = tfidfDS.randomSplit(Array(0.7, 0.3))
    val train = array(0)
    val test = array(1)

    // 应用朴素贝叶斯算法，训练模型
    val bayes = new NaiveBayes()
        .setFeaturesCol("tfidf")
        .setLabelCol("label")
    val bayesModel = bayes.fit(train)

    bayesModel.save("userprofile/data/comment_model/")

    // 测试模型的效果
    val result = bayesModel.transform(test).drop("tfidf")
    result.show(100,false)

    //统计准确率
    val correct = result.selectExpr("count(if(label=prediction,1,null))")
      .union(result.selectExpr("count(1) "))
    correct.show(100,false)


    // 模型评估 混淆矩阵
    val rdd = result.rdd.map(row=>{
      val label = row.getAs[Double]("label")
      val prediction = row.getAs[Double]("prediction")
      (prediction,label)
    })
    val matrix: Matrix = new MulticlassMetrics(rdd).confusionMatrix
    println(matrix)


    // 用auc来衡量模型效果
    val aucEvaluator = new BinaryClassificationEvaluator()
        .setMetricName("areaUnderROC")
        .setRawPredictionCol("rawPrediction")
        .setLabelCol("label")
    val auc: Double = aucEvaluator.evaluate(result)
    println(auc)





    spark.close()

  }

}
