package com.scala.sparkML

import org.apache.spark.ml.feature.{HashingTF, IDF, Tokenizer}
import org.apache.spark.sql.SparkSession

case class  Product(id:String,companyName:String,direction:String,productInfo:String)
object TFIDFTest2 {
    def main(args: Array[String]): Unit = {

        val spark = SparkSession
            .builder()
            .appName("TFIDF@2")
            .master("local")
            //.config("spark.sql.warehouse.dir", "C:\\Users\\Administrator\\Desktop\\spark-warehouse")
            .getOrCreate()

        // 导入spark的隐式转换
        import spark.implicits._
        val products = spark.sparkContext.textFile("dataFrametext").map { x =>
            val data = x.split(",")
            Product(data(0),data(1),data(2),data(3))
        }.toDS().cache()

        val productData = new Tokenizer().setInputCol("productInfo").setOutputCol("productWords").transform(products)
        val tf=new HashingTF().setNumFeatures(30).setInputCol("productWords").setOutputCol("productFeatures").transform(productData)
        val idfModel = new IDF().setInputCol("productFeatures").setOutputCol("features").fit(tf)
        idfModel.write.overwrite().save("idfModelData")
        val idfData = idfModel.transform(tf)
        idfData.select("id","companyName","features").show()

    }
}












