package bayes

import com.hankcs.hanlp.HanLP
import org.apache.commons.lang3.StringUtils
import org.apache.spark.ml.feature.{HashingTF, IDF, IDFModel, Tokenizer}
import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.{SparseVector, Vectors}
import org.apache.spark.mllib.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

import scala.collection.mutable

/**
  * Created by hunter.coder 涛哥
  * 2019/4/21 11:51
  * 交流qq:657270652
  * Version: 1.0
  * 更多学习资料：https://blog.csdn.net/coderblack/
  * Description: 评论文本分析bayes模型训练器
  **/
case class RawData(label: String, text: String)

object BayesModuleTrainer {

  def main(args: Array[String]): Unit = {

    // 好评训练数据输入路径
    val goodPath = "G:\\data_shark\\testdata\\comment\\good"
    // 中评训练数据输入路径
    val generalPath = "G:\\data_shark\\testdata\\comment\\general"
    // 差评训练数据输入路径
    val poorPath = "G:\\data_shark\\testdata\\comment\\poor"
    // bayes模型存储路径
    val modelSavePath = "G:\\data_shark\\testdata\\comment\\model"

    val spark = SparkSession.builder().appName("bayes").master("local").getOrCreate()
    import spark.implicits._
    import scala.collection.JavaConversions._


    // 加载停用词字典
    val stopWords = Set("在", "的", "了", "我", "和", "，", "。", "也", "是", "都")
    val bc = spark.sparkContext.broadcast(stopWords)

    // 读取3类训练数据
    val goodRdd = spark.sparkContext.textFile(goodPath)
    val generalRdd = spark.sparkContext.textFile(generalPath)
    val poorRdd = spark.sparkContext.textFile(poorPath)

    // 对3类数据分词并过滤停用词
    val goodSegRDD = goodRdd.map(line => HanLP.segment(line).map(term => term.word).filter(word => !bc.value.contains(word))).map(_.mkString(" "))
    val generalSegRDD = generalRdd.map(line => HanLP.segment(line).map(term => term.word).filter(word => !bc.value.contains(word))).map(_.mkString(" "))
    val poorSegRDD = poorRdd.map(line => HanLP.segment(line).map(term => term.word).filter(word => !bc.value.contains(word))).map(_.mkString(" "))

    // 将3类数据都分成两份：训练集和测试集
    val Array(trainGood, testGood) = goodSegRDD.randomSplit(Array(0.7, 0.3), 1234L)
    val Array(trainGeneral, testGeneral) = generalSegRDD.randomSplit(Array(0.7, 0.3), 1234L)
    val Array(trainPoor, testPoor) = poorSegRDD.randomSplit(Array(0.7, 0.3), 1234L)

    // 将3类数据的训练集转换成DataSet
    val ltGood = trainGood.map(RawData("2", _)).toDS()
    val ltGeneral = trainGeneral.map(RawData("1", _)).toDS()
    val ltPoor = trainPoor.map(RawData("0", _)).toDS()

    val tokenizer = new Tokenizer().setInputCol("text").setOutputCol("words")

    // 转换成分词模型
    val goodWords = tokenizer.transform(ltGood)
    val generalWords = tokenizer.transform(ltGeneral)
    val poorWords = tokenizer.transform(ltPoor)

    // union合并3类训练集数据为一个ds
    val words = goodWords.union(generalWords).union(poorWords)
    words.select('label, 'words).show(10, false)

    // 用spark ml库的hashingTF工具求TF
    val hashingTF = new HashingTF().setInputCol("words").setOutputCol("tf").setNumFeatures(10000)
    val tfData = hashingTF.transform(words)
    tfData.show(10, false)

    // 用spark ml库的IDF工具求IDF
    val idf = new IDF().setInputCol("tf").setOutputCol("tf_idf")
    val idfModel: IDFModel = idf.fit(tfData)
    val tfIdfData: DataFrame = idfModel.transform(tfData)
    tfIdfData.printSchema()
    tfIdfData.show(10, false)

    // 将tfidf特征数据，转成LabeledPoint向量模型
    val trainningData: RDD[LabeledPoint] = tfIdfData
      .select('label, 'tf_idf)
      .rdd
      .map({ case Row(label: String, tfidf: SparseVector) =>
        val vct: linalg.Vector = Vectors.dense(tfidf.toArray)
        // 将坑爹的 ml包下的vector转为mllib包的vector
        val vector = org.apache.spark.mllib.linalg.Vectors.fromML(vct)
        LabeledPoint(label.toDouble, vector)
      })
    trainningData.take(1).foreach(println)

    // 调用Spark ml的NaiveBayes算法对训练集数据进行模型训练
    val model: NaiveBayesModel = NaiveBayes.train(trainningData, 1.0, "multinomial")
    // 保存训练成果模型
    model.save(spark.sparkContext, modelSavePath)

    spark.close()
    sys.exit(0)

    /**
      * 准备测试数据，测试模型的预测精准度
      */
    println("----------------准备测试数据--------------------")
    val tsGood = testGood.map(RawData("2", _)).toDS()
    val tsGeneral = testGeneral.map(RawData("1", _)).toDS()
    val tsPoor = testPoor.map(RawData("0", _)).toDS()


    val tsgoodWords = tokenizer.transform(tsGood)
    val tsgeneralWords = tokenizer.transform(tsGeneral)
    val tspoorWords = tokenizer.transform(tsPoor)

    val tswords = tsgoodWords.union(tsgeneralWords).union(tspoorWords)

    val tstfData = hashingTF.transform(tswords)
    val tstfIdfData: DataFrame = idfModel.transform(tstfData)

    val testData: RDD[LabeledPoint] = tstfIdfData.select('label, 'tf_idf).rdd.map({ case Row(label: String, tfidf: SparseVector) =>
      val vct: linalg.Vector = Vectors.dense(tfidf.toArray)
      // 将坑爹的 ml包下的vector转为mllib包的vector
      val vector = org.apache.spark.mllib.linalg.Vectors.fromML(vct)
      LabeledPoint(label.toDouble, vector)
    })

    val testpredictionAndLabel = testData.map(p => (model.predict(p.features), p.label))

    //统计分类准确率
    var testaccuracy = 1.0 * testpredictionAndLabel.filter(x => x._1 == x._2).count() / testData.count()
    println("accuracy：")
    println(testaccuracy)

    spark.close()
  }

}
