package com.shujia.mllib

import org.apache.spark.ml.feature.{HashingTF, Tokenizer}
import org.apache.spark.ml.feature.IDF

import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}

object TF_IDF {
  def main(args: Array[String]): Unit = {


    val conf = new SparkConf().setAppName("TF_IDF").setMaster("local")
    val sc = new SparkContext(conf)
    val sQLContext = new SQLContext(sc)
    import sQLContext.implicits._
    val lines = sc.textFile("data/word.txt")

    //将数据进行分词，并转换成DF
    val dataDF = lines.map(line => {
      val words = IK.fit(line)
      (line, words.mkString(" "))
    }).toDF("line", "words")

    dataDF.show()

    //Tokenizer  英文分词器
    val tok = new Tokenizer()
      .setInputCol("words")
      .setOutputCol("feature")

    val tokDF = tok.transform(dataDF)
    tokDF.show()

    //将数据转换成稀疏向量
    val tfModel = new HashingTF()
      .setInputCol("feature")
      .setOutputCol("tf")


    val tfDF = tfModel.transform(tokDF)

    //指定false显示全部内容
    tfDF.show(false)

    //计算if-idf
    val idf = new IDF()
      .setInputCol("tf")
      .setOutputCol("tf-idf")

    val idfModel = idf.fit(tfDF)

    val idfDF = idfModel.transform(tfDF)

    idfDF.show(false)

  }
}
