from pyspark import SparkContext
from pyspark.mllib.regression import LinearRegressionWithSGD, LabeledPoint
import os
from matplotlib import pyplot as plt

from pyspark.mllib.tree import DecisionTree

os.environ['PYSPARK_PYTHON'] = "python3"

points = [(1.1, 0.77), (0.9, 1.43), (1.77, 1.53), (2.87, 1.5), (2.2, 1.17), (2.4, 2.3), (1.63, 2.6), (3.13, 2.97),
          (3.2, 2.13), (4.2, 2.93), (4.03, 3.93), (2.93, 3.93), (2.43, 3.17), (3.67, 3.4), (4.57, 3.4), (4.17, 4.37),
          (3.63, 2.13), (4.23, 2.23), (3.87, 2.7), (3.67, 4.2), (2.73, 2.93), (1.73, 2.2), (2.5, 2.33), (3.27, 2.7),
          (2.57, 1.33)]

sc = SparkContext.getOrCreate()

labeledPoints = sc.parallelize(points).map(lambda pt: LabeledPoint(pt[1], (pt[0],)))
model = DecisionTree.trainRegressor(labeledPoints, {})


def squared_error(pt):
    pre = pt[0]
    act = pt[1]
    return (pre - act) ** 2


def abs_error(pt):
    import numpy as np
    return np.abs(pt[0] - pt[1])


predicat_values = [(model.predict(pt.features), pt.label) for pt in labeledPoints.collect()]

rdd = sc.parallelize(predicat_values)

# 均方误差
print(rdd.map(squared_error).mean())

# 平均绝对误差
print(rdd.map(abs_error).mean())
# X = [x for x, _ in points]
# Y = [y for _, y in points]
# predicat_values = [model.predict((x,)) for x in X]
#
# plt.scatter(X, Y, color="red")
# plt.scatter(X, predicat_values, color="blue")
# plt.show()
# print(model.depth(), model.numNodes())
