from pyspark.sql import SparkSession
spark=SparkSession.builder.master('local').appName('IDF').getOrCreate()
from pyspark.ml.feature import Tokenizer, HashingTF, IDF

sentenceData=spark.createDataFrame([
    (0,'I heard about Spark and Spark Spark Spark'),
    (0,'Java Java Java Python C# C#'),
    (1,'Logistic regression models are neat')
]).toDF('label','sentence')

tokenizer = Tokenizer(inputCol='sentence',outputCol='words')
wordsData=tokenizer.transform(sentenceData)
wordsData.show()


hashingTF=HashingTF(inputCol='words',outputCol='rawFeatures',numFeatures=2000)
featurizedData=hashingTF.transform(wordsData)
featurizedData.select('words','rawFeatures').show(truncate=False)


idf=IDF(inputCol='rawFeatures',outputCol='features')
idfModel=idf.fit(featurizedData)
rescaledData=idfModel.transform(featurizedData)
rescaledData.select('features','label').show(truncate=False)
