package cn.doitedu.ml

import cn.doitedu.commons.utils.SparkUtil
import com.hankcs.hanlp.HanLP
import org.apache.spark.ml.classification.NaiveBayes
import org.apache.spark.ml.feature.{HashingTF, IDF}
import org.apache.spark.sql.Dataset

/**
 * @Title: ${file_name}
 * @Package ${package_name}
 * @Description: ${todo}
 * @Author hunter@doitedu.cn
 * @date 2020/8/1617:36     
 */
object CommentClassifyModelTrain {
  def main(args: Array[String]): Unit = {

    val spark = SparkUtil.getSparkSession("语义分类分析朴素贝叶斯模型训练")
    import spark.implicits._
    import org.apache.spark.sql.functions._

    // 加载中评样本数据
    val general = spark.read.textFile("F:\\testdata\\comment\\general").selectExpr("cast(0.0 as double) as label", "value")
    val Array(generalSample, generalTest) = general.randomSplit(Array(0.8, 0.2))

    // 加载好评样本数据
    val good = spark.read.textFile("F:\\testdata\\comment\\good").selectExpr("cast(1.0 as double)  as label", "value")
    val Array(goodSample, goodTest) = good.randomSplit(Array(0.8, 0.2))

    // 加载差评样本数据
    val poor = spark.read.textFile("F:\\testdata\\comment\\poor").selectExpr("cast(2.0 as double)  as label", "value")
    val Array(poorSample, poorTest) = poor.randomSplit(Array(0.8, 0.2))


    // 将3类训练样本合并成一个数据集
    val sample = generalSample.union(goodSample).union(poorSample)

    // 将3类测试样本合并成一个数据集
    val test = generalTest.union(goodTest).union(poorTest)

    // 定义一个分词函数
    val segment = (s: String) => {
      import scala.collection.JavaConversions._
      HanLP.segment(s).map(term => term.word).filter(w=> true).toArray
    }

    // 加载停止词
    val stopwords = spark.read.textFile("portrait/testdata/stopwords")
    val lst = stopwords.collectAsList()
    val bc = spark.sparkContext.broadcast(lst)

    // 对训练集做分词
    val sampleWords = sample.rdd.map(row=>{
      val stopwds = bc.value
      val label = row.getAs[Double]("label")
      val str = row.getAs[String]("value")
      val words = segment(str).filter(w => !stopwds.contains(w)).toArray
      (label,words)
    }).toDF("label","words")
    sampleWords.show(20,false)

    // 将词数组，转成TF特征向量
    val hashingTF = new HashingTF()
        .setInputCol("words")
        .setNumFeatures(100000)
        .setOutputCol("tfvec")
    val tfVecs = hashingTF.transform(sampleWords)

    // 训练idf模型
    val idf = new IDF()
      .setInputCol("tfvec")
      .setOutputCol("idfvec")

    // 利用idf模型，将前面的tf特征向量转成： tfidf特征值向量
    val idfModel = idf.fit(tfVecs)
    val tfidfVecs = idfModel.transform(tfVecs).drop("words","tfvec")

    tfidfVecs.show(10,false)

    // 利用加工好的训练样本的 tfidf特征值向量，去训练朴素贝叶斯模型
    val naiveBayes = new NaiveBayes()
        .setSmoothing(1.0)
        .setFeaturesCol("idfvec")
        .setLabelCol("label")

    val model = naiveBayes.fit(tfidfVecs)


    // 对测试数据集进行特征加工
    val testWords = test.rdd.map(row=>{
      val label = row.getAs[Double]("label")
      val s = row.getAs[String]("value")
      import scala.collection.JavaConversions._
      val words = HanLP.segment(s).map(term => term.word).filter(w=> true).toArray
      (label,words)
    }).toDF("label","words")

    val hashingTF2 = new HashingTF()
        .setNumFeatures(100000)
        .setInputCol("words")
        .setOutputCol("tfvec")

    val testTf = hashingTF2.transform(testWords)


    val idf2 = new IDF()
        .setInputCol("tfvec")
        .setOutputCol("idfvec")

    val idfM2 = idf2.fit(testTf)
    val testIdf = idfM2.transform(testTf).drop("words","tfvec")

    // 用前面训练好的贝叶斯模型，对向量化好的测试数据集进行分类预测
    val predict = model.transform(testIdf)

    val total = predict.count()
    val correct = predict.where("label = prediction").count()
    println("准确率 ==>  ==>  ==> "+(correct.toDouble / total))





    spark.close()


  }

}
