package bayes

import scala.reflect.runtime.universe
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.ml.classification.NaiveBayes
import org.apache.spark.ml.feature._
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

/**
  * Created by hunter.coder 涛哥  
  * 2019/4/25 12:42
  * 交流qq:657270652
  * Version: 1.0
  * 更多学习资料：https://blog.csdn.net/coderblack/
  * Description:
  **/

case class RawDataRecord(docid:String,category: String, text: String)

object BayesDemo {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("bayes").master("local").getOrCreate()
    import spark.implicits._

    // 加载原始数据
    val rdd = spark.read.textFile("G:\\data_shark\\doit_recommender\\src\\test\\data\\1.txt")

    // 转化成caseclass（类别编号，文本）
    val catDs = rdd.map(line => {
      val split = line.split(",")
      RawDataRecord(split(0), split(1),split(2))
    })

    //catDs.select($"category",$"text").show(10)

    val tokenizer = new Tokenizer().setInputCol("text").setOutputCol("words")
    val wordDf: DataFrame = tokenizer.transform(catDs)
    /* docid   category   text    words  */

    println("----------wordDf----------")
    wordDf.printSchema()
    //wordDf.select('category,'text,'words).show(10)

    /**
      * 将每个词转换成Int型，并计算其在文档中的词频（TF）
      * 这里将中文词语转换成INT型的Hashing算法，类似于Bloomfilter，
      * setNumFeatures(100)表示将Hash分桶的数量设置为100个，这个值默认为2的20次方，即1048576，
      * 可以根据你的词语数量来调整，一般来说，这个值越大，不同的词被计算为一个Hash值的概率就越小，数据也更准确，但需要消耗更大的内存，和Bloomfilter是一个道理。
      */
    val hashingTF = new HashingTF().setInputCol("words").setOutputCol("tf").setNumFeatures(100)
    val featurizedDf = hashingTF.transform(wordDf)
    featurizedDf.show(10, false)
    /**
      * +--------+-----------------+---------------------+------------------------------+
      * |category|text             |words                |tf [SparseVector]                  |
      * +--------+-----------------+---------------------+------------------------------+
      * |0       |苹果 官网 苹果 宣布 |[苹果, 官网, 苹果, 宣布]|(100,[8,60,85],[1.0,2.0,1.0]) |  SparseVector
      * |1       |苹果 梨 香蕉       |[苹果, 梨, 香蕉]       |(100,[29,60,65],[1.0,1.0,1.0])|  SparseVector
      * +--------+-----------------+---------------------+------------------------------+
      * 60表示“苹果”，词频为2.0
      */

    // 计算TF-IDF值
    val idf = new IDF().setInputCol("tf").setOutputCol("tfidf")
    val iDFModel: IDFModel = idf.fit(featurizedDf)
    val idfDf: DataFrame = iDFModel.transform(featurizedDf)

    idfDf.printSchema()
    idfDf.show(10,false)

    /**
      * +--------+--------------------+----------------------+------------------------------+------------------------------------------------------------+
      * |category|text                |words                 |tf                            |tfidf                                                    |
      * +--------+--------------------+----------------------+------------------------------+------------------------------------------------------------+
      * |0       |苹果 官网 苹果 宣布    |[苹果, 官网, 苹果, 宣布] |(100,[8,60,85],[1.0,2.0,1.0]) |(100,[8,60,85],[0.4054651081081644,0.0,0.4054651081081644]) |
      * |1       |苹果 梨 香蕉          |[苹果, 梨, 香蕉]        |(100,[29,60,65],[1.0,1.0,1.0])|(100,[29,60,65],[0.4054651081081644,0.0,0.4054651081081644])|
      * +--------+--------------------+----------------------+------------------------------+------------------------------------------------------------+
      * 因为一共只有两个文档，且都出现了“苹果”，因此该词的TF-IDF值为0
      */


    // 将上面的数据转换成Bayes算法所需要的格式
    val trainDs = idfDf.select('category, 'features).rdd.map {
      row =>
        val label = row.getAs[String]("category")
        val features = row.getAs[org.apache.spark.ml.linalg.SparseVector]("features").toDense
        LabeledPoint(label.toDouble, Vectors.dense(features.toArray))
    }.toDS()


    trainDs.show(10,false)

    val model = new NaiveBayes().fit(trainDs)

    model.transform(idfDf.select('features)).show(10,false)


    spark.close()

  }

}
