package cn.doitedu.ml.doit13.profile

import java.util

import cn.doitedu.commons.util.SparkUtil
import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.seg.common.Term
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.classification.NaiveBayes
import org.apache.spark.ml.feature.{HashingTF, IDF}
import org.apache.spark.sql.{DataFrame, Dataset, Row}

/**
 * 用户商品评论贝叶斯分类模型训练
 * 情感分类：  好评  中评  差评
 */
object CommentClassifyModelTrain {

  def main(args: Array[String]): Unit = {

    Logger.getLogger("org").setLevel(Level.WARN)
    val spark = SparkUtil.getSparkSession(this.getClass.getSimpleName)
    import spark.implicits._
    import org.apache.spark.sql.functions._

    // 加载好评样本
    val good = spark.read.textFile("F:\\testdata\\comment\\good").selectExpr("value as cmt","cast(0.0 as double) as label")

    // 加载中评样本
    val general = spark.read.textFile("F:\\testdata\\comment\\general").selectExpr("value as cmt","cast(1.0 as double) as label")

    // 加载差评样本
    val poor = spark.read.textFile("F:\\testdata\\comment\\poor").selectExpr("value as cmt","cast(2.0 as double) as label")

    // 合并3类样本
    val sample = good.union(general).union(poor)
    sample.show(30,false)
    sample.cache()

    // 加载停止词字典
    val stopWordsSet = spark.read.textFile("userprofile/data/demo/stopwords").rdd.collect().toSet
    val bc = spark.sparkContext.broadcast(stopWordsSet)


    // 对中文评语进行分词处理
    val wordsSample = sample.rdd.map(row=>{

      val stopwords: Set[String] = bc.value

      // 从row中取出两个字段
      val cmt = row.getAs[String]("cmt")
      val label = row.getAs[Double]("label")

      // 对评语分词
      val terms: util.List[Term] = HanLP.segment(cmt)
      import scala.collection.JavaConversions._
      // 将Term列表转成字符串词数组，并过滤停止词
      val words = terms.map(term=>term.word).toArray.filter(w => !stopwords.contains(w))

      (words,label)
    }).toDF("words","label")

    wordsSample.show(20,false)

    // 将词特征向量化  (要用到hash词映射算法、tf-idf特征值提取算法）
    val tf = new HashingTF()
        .setInputCol("words")
        .setNumFeatures(100000)
        .setOutputCol("tf_vec")
    // 用tf算法，将词数组，映射成 tf值向量
    val tfVecs = tf.transform(wordsSample)

    // 用idf算法，将上面tf特征向量集合变成 TF-IDF特征值向量集合
    val idf = new IDF()
        .setInputCol("tf_vec")
        .setOutputCol("tf_idf_vec")
    val idfModel = idf.fit(tfVecs)
    val tfidfVecs: DataFrame = idfModel.transform(tfVecs)
    //tfidfVecs.show(20,false)


    // 别把整个样本集都做为训练原料，要留一点来做预测效果测试
    // 将样本集划分成两部分：80%作为训练集     20%作为测试集
    //val Array(train,test) = tfidfVecs.randomSplit(Array(0.8, 0.2))
    val array: Array[Dataset[Row]] = tfidfVecs.randomSplit(Array(0.8, 0.2))
    val train = array(0)
    val test = array(1)

    // 训练朴素贝叶斯模型
    val bayes = new NaiveBayes()
        .setLabelCol("label")
        .setFeaturesCol("tf_idf_vec")
        .setSmoothing(1.0)
        .setModelType("multinomial")
    val model = bayes.fit(train)

    // 测试模型的预测效果
    val predict = model.transform(test).drop("words","tf_vec","tf_idf_vec")
    predict.show(10,false)
    predict.printSchema()
    // 统计预测准确率
    val total = predict.count()
    val correct = predict.where("label=prediction").count()

    println("total：   " + total)
    println("correct：   " + correct)
    println("准确率：   " + correct/total.toDouble)

    // 保存模型
    //model.save("userprofile/data/cmt_bayes_model")

    spark.close()
  }
}
