package com.shujia.mlib

import org.apache.spark.ml.feature.{HashingTF, Tokenizer, IDF}
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}

object TF_IDF {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster("local").setAppName("tf-idf")

    val sc = new SparkContext(conf)

    val sQLContext = new SQLContext(sc)

    import sQLContext.implicits._


    val rdd = sc.textFile("spark/data/word.txt")

    //分词
    val wordDF = rdd.map(line => {
      val words = IK.fit(line)
      (line, words.mkString(" "))
    }).toDF("text", "words")

    //Tokenizer  英文分词器
    val tok = new Tokenizer()
      .setInputCol("words")
      .setOutputCol("feature")


    val fratrueDF = tok.transform(wordDF)


    //计算tf
    val tf = new HashingTF()
      .setInputCol("feature")
      .setOutputCol("tf")
//      .setNumFeatures(1000)  太小可能导致碰撞

    val tfDF = tf.transform(fratrueDF)


    ///计算idf
    val idf = new IDF()
      .setInputCol("tf")
      .setOutputCol("tf-idf")

    val model = idf.fit(tfDF)

    val idfDF = model.transform(tfDF)

    idfDF.show(false)

  }
}
