package cn.doitedu.sparkml.bayes

import cn.doitedu.commons.utils.SparkUtil
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.ml.feature.{HashingTF, IDF, Tokenizer}
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * @author: 余辉
  * @blog: https://blog.csdn.net/silentwolfyh
  * @create: 2019/10/21
  * @description:
  * 需求：文本分类，bayes训练模型demo
  *
  * 步鄹：
  * 1、spark读取测试数据获取Schema
  * 2、将doc列通过Tokenizer类进行切分，通过HashingTF类提取词频，通过IDF类预处理
  * 3、最后通过NaiveBayes类将label和doc的训练列传输
  * 4、保存训练结果
  *
  **/
object BayesTest {

  def main(args: Array[String]): Unit = {

    val spark = SparkUtil.getSparkSession(this.getClass.getName)
    import spark.implicits._

    val file = spark.read.option("header",true).csv("rec_system/demodata/bayes_demo_data/sample.txt")

    val tokenizer: Tokenizer = new Tokenizer().setInputCol("doc").setOutputCol("words")
    val wordDF: DataFrame = tokenizer.transform(file)

    val tf: HashingTF = new HashingTF().setInputCol("words").setOutputCol("tf").setNumFeatures(100)
    val tfDF = tf.transform(wordDF)

    val idf: IDF = new IDF().setInputCol("tf").setOutputCol("idf")
    val idfDF: DataFrame = idf.fit(tfDF).transform(tfDF)

    val bayes: NaiveBayes = new NaiveBayes().setLabelCol("label").setFeaturesCol("idf").setSmoothing(0.1)

    bayes.write.save("")
    spark.close()
  }
}
