from pyspark import SparkContext
from pyspark.ml.linalg import Vectors
from pyspark.ml.classification import LogisticRegression, LogisticRegressionModel
from pyspark.sql import SparkSession

spark = SparkSession(SparkContext.getOrCreate())
# Prepare training data from a list of (label, features) tuples.
training = spark.createDataFrame([
    (1.0, Vectors.dense([0.0, 1.1, 0.1])),
    (0.0, Vectors.dense([2.0, 1.0, -1.0])),
    (0.0, Vectors.dense([2.0, 1.3, 1.0])),
    (1.0, Vectors.dense([0.0, 1.2, -0.5]))], ["label", "features"])

test_set = spark.createDataFrame([(1.0, Vectors.dense([0.5, 0.5, 0.5])),
                                  (1.0, Vectors.dense([0.0, 1.2, -0.5]))],
                                 ['label', 'features'])
# Create a LogisticRegression instance. This instance is an Estimator.
lr = LogisticRegression(maxIter=10, regParam=0.01)
# Print out the parameters, documentation, and any default values.
print("LogisticRegression parameters:\n" + lr.explainParams() + "\n")

# Learn a LogisticRegression model. This uses the parameters stored in lr.
model1 = lr.fit(training)
assert isinstance(model1, LogisticRegressionModel)
# print(model1.interceptVector, model1.coefficientMatrix)
# save model
# model1.save("file:///Users/sonto/Workspace/Rimi/P1902/spark_example/ml_model")
# summary = model1.evaluate(test_set)
# print(model1)
predication = model1.transform(test_set)
print(type(predication))
predication.show()

