from pyspark import SparkContext
from pyspark.mllib.classification import LogisticRegressionWithSGD, LogisticRegressionWithLBFGS
from pyspark.mllib.evaluation import BinaryClassificationMetrics
from pyspark.mllib.linalg.distributed import RowMatrix
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.tree import DecisionTree

from liuqi.machine_learning.point import CLASS_A, CLASS_B
import os


os.environ["SPARK_PYTHON"] = "python3"
sc = SparkContext.getOrCreate()
labeledPoints = [LabeledPoint(1, pt) for pt in CLASS_A] + [LabeledPoint(0, pt) for pt in CLASS_B]
labeledPointRDD = sc.parallelize(labeledPoints)
featuresRDD = labeledPointRDD.map(lambda pt:pt.features)

# matrix = RowMatrix(featuresRDD)
# summary = matrix.computeColumnSummaryStatistics()
# print(summary.mean())
# print(summary.variance())
#
# mean = sc.broadcast(summary.mean())
# variance = sc.broadcast(summary.variance())
# import numpy as np
# # 缩放
# rdd2 = featuresRDD.map(lambda feature: feature - mean.value).map(lambda feature: feature/np.sqrt(variance.value))
# summary = RowMatrix(rdd2).computeColumnSummaryStatistics()
# print(summary.mean())
# print(summary.variance())

# model = LogisticRegressionWithSGD.train(labeledPointRDD, iterations=3, step=1.0, regType="l1")
# metrics = BinaryClassificationMetrics(labeledPointRDD.map(lambda pt: (float(model.predict(pt.features)), pt.label)))
# print(metrics.areaUnderROC)

model = DecisionTree.trainClassifier(labeledPointRDD, numClasses=2, categoricalFeaturesInfo={})
predicated_values = [(float(model.predict(pt.features)), pt.label) for pt in labeledPointRDD.collect()]
metrics = BinaryClassificationMetrics(sc.parallelize(predicated_values))
print(metrics.areaUnderROC)