from pyspark.ml import Pipeline
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import  HashingTF,Tokenizer

from pyspark import SparkContext,SparkConf
from pyspark.sql import SparkSession
spark = SparkSession.builder.config(SparkConf()).getOrCreate


# 引入要包含的包并构建训练数据集
# Prepare training documents from a list of (id, text, label) tuples.


training = spark.createDataFrame([(0, "a b c d e spark", 1.0),(1, "b d", 0.0),(2, "spark f g h", 1.0),(3, "hadoop mapreduce", 0.0)],["id", "text", "label"])

# 构建一个机器学习流水线
# 定义 Pipeline 中的各个流水线阶段 PipelineStage ,包括转换器和评估器,具体地,包含 tokenizer, hashingTF 和 lr
tokenizer = Tokenizer(inputCol="text", outputCol="words")
hashingTF = HashingTF(inputCol=tokenizer.getOutputCol(), outputCol="features")
lr = LogisticRegression(maxIter=10, regParam=0.001)


# ( 3 )按照具体的处理逻辑有序地组织 PipelineStages ,并创建一个 Pipeline 。
pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])

# 现在构建的 Pipeline 本质上是一个 Estimator ,在它的fit() 方法运行之后,它将产生一个 PipelineModel ,它是一个 Transformer 。
model = pipeline.fit(training)
# 可以看到, model 的类型是一个 PipelineModel ,这个流水线模型将在测试数据的时候使用

# 4 )构建测试数据

# test = spark.createDataFrame([
#     (4, "spark i j k"),
#     (5, "l m n"),
#     (6, "spark hadoop spark"),
#     (7, "apache hadoop")],
#     ["id", "text"])
#
# # ( 5 )调用之前训练好的 PipelineModel 的 transform() 方法,让测试数据按顺序通过拟合的流水线,生成预测结果
# prediction = model.transform(test)
# selected = prediction.select("id", "text", "probability", "prediction")
# for row in selected.collect():
#     rid, text, prob, prediction = row
#     print("(%d, %s) --> prob=%s, prediction=%f" % (rid, text, str(prob), prediction))
