from pyspark import SparkContext
from pyspark.ml.classification import LogisticRegression, LogisticRegressionModel
from pyspark.ml.linalg import Vectors
from pyspark.sql import SparkSession


def split_line(line):
    pts = line.split(",")
    return (pts[-1], (pts[0], pts[1]))


sc = SparkContext()
ss = SparkSession(sparkContext=sc)
df = ss.read.csv("file:///Users/sonto/Workspace/Rimi/P1902/spark_example/points.txt", sep=",")
df.show()
new_df = df.rdd.map(lambda row: (1 if row._c2 == 'R' else 0, Vectors.dense([float(row._c0), float(row._c1)])))
new_df.toDF().show()
lr = LogisticRegression(maxIter=10, regParam=0.01)
model = lr.fit(ss.createDataFrame(new_df, ['label', 'features']))
# model.save("file:///Users/sonto/Workspace/Rimi/P1902/spark_example/ml_lesson1")
assert isinstance(model, LogisticRegressionModel)
new_df = model.transform(ss.createDataFrame([(Vectors.dense((0.5, 0.5)),)], ['features']))
new_df.show()

# rdd = sc.textFile("file:///Users/sonto/Workspace/Rimi/P1902/spark_example/points.txt")
# points_rdd = rdd.map(split_line).map(lambda pt: LabeledPoint(1 if pt[0] == 'R' else 0, Vectors.dense(pt[1])))
#
# model = LogisticRegressionWithLBFGS.train(points_rdd)
# assert isinstance(model, LogisticRegressionModel)
# print(model.intercept, model.numFeatures)
# print(model.predict((0.6, 0.6)))
