package com.scala.sparkML

import org.apache.spark.ml.feature.{HashingTF, IDF, Tokenizer}
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}


object TFIDFTest1 {
    def main(args: Array[String]): Unit = {
       val conf= new SparkConf().setAppName("mlTFIDF").setMaster("local[2]")
        val sc = new SparkContext(conf)

        val sqlContext = new SQLContext(sc)
        import sqlContext.implicits._
        //假设3个文档，一个语料库
        val sData=sqlContext.createDataFrame(Seq(
            (0,"hi i love you"),
            (1,"i hope i get a new job which i love"),
            (2,"logistic regression models are neat")
        )).toDF("label","sentence")
        val tokenizer = new Tokenizer().setInputCol("sentence").setOutputCol("words")
        val wordData = tokenizer.transform(sData)
        //wordData.show()
        val hashingTF=new HashingTF().setInputCol("words").setOutputCol("rawFeatures").setNumFeatures(30)//维度个数大于每个总次数
        val tf = hashingTF.transform(wordData)//TF
        tf.show()
        val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")
        //一般模型计算完毕保存到hdfs中，为了以后数据的加载模型计算
        val idfModel = idf.fit(tf)
        val rescaledData = idfModel.transform(tf)
        rescaledData.select("features","label").take(3).foreach(println(_))


    }
}











