from pyspark.sql import SparkSession
spark=SparkSession.builder.master('local').appName('WordCount').getOrCreate()

from pyspark.ml import Pipeline
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import HashingTF,Tokenizer

training=spark.createDataFrame([
    (0,"some other words spark",1.0),
    (1,"such as are",0.0),
    (2,"some else but spark",1.0),
    (3,"Java Python Go C#",0.0)
],["id","text","label"])

training.show()


tokenizer=Tokenizer(inputCol="text",outputCol="words")
hashingTF=HashingTF(inputCol=tokenizer.getOutputCol(),outputCol="features")
lr=LogisticRegression(maxIter=10,regParam=0.001)

pipeline=Pipeline(stages=[tokenizer,hashingTF,lr])

model=pipeline.fit(training)

test=spark.createDataFrame([
    (4,"like text spark do"),
    (5,"Docker k8s lua"),
    (6,"apache spark Tomcat"),
    (7,"apache hadoop Servlet spark spark")
],["id","text"])

test.show()


prediction = model.transform(test)
selected=prediction.select('id','text','probability','prediction')
for row in selected.collect():
    rid,text,probability,prediction =row
    print("(%d,%s) probability=%s,prediction=%f"%(rid,text,str(probability),prediction))