package bayes

import com.hankcs.hanlp.HanLP
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.ml.feature.{HashingTF, IDF}
import org.apache.spark.sql.SparkSession

/**
  * Created by hunter.coder 涛哥  
  * 2019/5/6 11:25
  * 交流qq:657270652
  * Version: 1.0
  * 更多学习资料：https://blog.csdn.net/coderblack/
  * Description:   朴素贝叶斯算法模型训练
  **/
object NaiveBayesModuleTrainner {

  def main(args: Array[String]): Unit = {

    Logger.getLogger("org").setLevel(Level.WARN)
    val spark = SparkSession.builder().appName("bayes_dp").master("local").getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._
    import scala.collection.JavaConversions._

    val ds = spark.read.textFile("G:\\testdata\\comment\\all").filter(line => line.split("\001").size > 1)
      .map(line => {
        val arr = line.split("\001")

        (arr(0).toDouble, arr(1).split(" "))
      }).toDF("lable", "words")

    ds.show(10, false)


    val tf = new HashingTF().setInputCol("words").setOutputCol("tf")
    val tf_ds = tf.transform(ds).drop("words")

    val idf = new IDF().setInputCol("tf").setOutputCol("feature")
    val idfModel = idf.fit(tf_ds)
    val tfidf_ds = idfModel.transform(tf_ds).drop("tf")


    // 把样本数据分成训练数据集和测试数据集
    val dsArray = tfidf_ds.randomSplit(Array(0.8, 0.2))
    val dsTrain = dsArray(0)
    val dsTest = dsArray(1)

    dsTrain.show(10, false)

    // 调算法训练模型
    val bayes = new NaiveBayes().setFeaturesCol("feature").setLabelCol("lable")
    val bayesModel: NaiveBayesModel = bayes.fit(dsTrain)


    // 保存模型
    bayesModel.save("G:\\testdata\\comment\\bayesmodule")


    // 测试模型预测的准确度
    val predictionDs = bayesModel.transform(dsTest)

    // 计算预测准确的条数
    val correctNum = predictionDs
      .select("lable", "prediction")
      .where("lable = prediction")
      .show(20,false)
      //.count()

    // 预测正确的条数/测试数据总条数 =  准确度
    //println(correctNum / dsTest.count())



    // 预测新的评论数据
    val rateDs = spark.read.option("header","true").csv("G:\\data_shark\\doit_recommender\\src\\test\\java\\cb_rec\\u.comment.dat")

    val wordsDf = rateDs.map(row=>{

      val gid = row.getAs[String]("gid")
      val pid = row.getAs[String]("pid")
      val comment = row.getAs[String]("comment")

      (gid,pid,HanLP.segment(comment).map(_.word).toArray)
    }).toDF("gid","pid","words")


    val tfDf = tf.transform(wordsDf)

    val idfMd = idf.fit(tfDf)
    // 生成结果列：  gid  pid   feature
    val tfidfDf = idfMd.transform(tfDf).drop("words").drop("tf")

    val result = bayesModel.transform(tfidfDf).select("gid","pid","prediction")

    result.write.json("G:\\testdata\\comment\\predict")


    spark.close()

  }


}
